diff --git a/go.mod b/go.mod
index 86c1b1c0f..6a81dfe58 100644
--- a/go.mod
+++ b/go.mod
@@ -5,10 +5,10 @@ require (
cloud.google.com/go v0.37.4 // indirect
contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible
- github.com/Azure/go-autorest v12.0.0+incompatible // indirect
+ github.com/Azure/go-autorest v13.3.0+incompatible // indirect
+ github.com/Azure/go-autorest/autorest v0.9.2 // indirect
github.com/cenkalti/backoff v2.1.1+incompatible
github.com/cpuguy83/go-md2man v1.0.10 // indirect
- github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/dnaeon/go-vcr v1.0.1 // indirect
github.com/elithrar/simple-scrypt v1.3.0
github.com/go-ini/ini v1.42.0 // indirect
@@ -49,3 +49,5 @@ require (
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
gopkg.in/yaml.v2 v2.2.2 // indirect
)
+
+go 1.13
diff --git a/go.sum b/go.sum
index ba8a6da78..4a51755bb 100644
--- a/go.sum
+++ b/go.sum
@@ -10,6 +10,21 @@ github.com/Azure/azure-sdk-for-go v27.3.0+incompatible h1:i+ROfG3CsZUPoVAnhK06T3
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v13.3.0+incompatible h1:8Ix0VdeOllBx9jEcZ2Wb1uqWUpE1awmJiaHztwaJCPk=
+github.com/Azure/go-autorest v13.3.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
+github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
deleted file mode 100644
index ee417bbe6..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-go:
- - 1.11.x
-
-go_import_path: contrib.go.opencensus.io/exporter/ocagent
-
-before_script:
- - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
- - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
-
-script:
- - go build ./... # Ensure dependency updates don't break build
- - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
- - go vet ./...
- - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled
- - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules.
- - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
deleted file mode 100644
index 0786fdf43..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# How to contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult [GitHub Help] for more
-information on using pull requests.
-
-[GitHub Help]: https://help.github.com/articles/about-pull-requests/
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
deleted file mode 100644
index 3b9e908f5..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# OpenCensus Agent Go Exporter
-
-[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
-
-
-This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
-OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
-OpenCensus Library, export them to other backends and possibly push configurations back to
-Library. See more details on [OC-Agent Readme][OCAgentReadme].
-
-Note: This is an experimental repository and is likely to get backwards-incompatible changes.
-Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
-
-## Installation
-
-```bash
-$ go get -u contrib.go.opencensus.io/exporter/ocagent
-```
-
-## Usage
-
-```go
-import (
- "context"
- "fmt"
- "log"
- "time"
-
- "contrib.go.opencensus.io/exporter/ocagent"
- "go.opencensus.io/trace"
-)
-
-func Example() {
- exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
- if err != nil {
- log.Fatalf("Failed to create the agent exporter: %v", err)
- }
- defer exp.Stop()
-
- // Now register it as a trace exporter.
- trace.RegisterExporter(exp)
-
- // Then use the OpenCensus tracing library, like we normally would.
- ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
- defer span.End()
-
- for i := 0; i < 10; i++ {
- _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
- <-time.After(6 * time.Millisecond)
- iSpan.End()
- }
-}
-```
-
-[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
-[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
-[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
-[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
-[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
-[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent
-
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
deleted file mode 100644
index 297e44b6e..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "math/rand"
- "time"
-)
-
-var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
-
-// retries function fn upto n times, if fn returns an error lest it returns nil early.
-// It applies exponential backoff in units of (1< 0 {
- ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
- }
- traceExporter, err := traceSvcClient.Export(ctx)
- if err != nil {
- return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
- }
-
- firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
- Node: node,
- Resource: ae.resource,
- }
- if err := traceExporter.Send(firstTraceMessage); err != nil {
- return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
- }
-
- ae.mu.Lock()
- ae.traceExporter = traceExporter
- ae.mu.Unlock()
-
- // Initiate the config service by sending over node identifier info.
- configStream, err := traceSvcClient.Config(context.Background())
- if err != nil {
- return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
- }
- firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
- if err := configStream.Send(firstCfgMessage); err != nil {
- return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
- }
-
- // In the background, handle trace configurations that are beamed down
- // by the agent, but also reply to it with the applied configuration.
- go ae.handleConfigStreaming(configStream)
-
- return nil
-}
-
-func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
- metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
- metricsExporter, err := metricsSvcClient.Export(context.Background())
- if err != nil {
- return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
- }
- // Initiate the metrics service by sending over the first message just containing the Node and Resource.
- firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
- Node: node,
- Resource: ae.resource,
- }
- if err := metricsExporter.Send(firstMetricsMessage); err != nil {
- return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
- }
-
- ae.mu.Lock()
- ae.metricsExporter = metricsExporter
- ae.mu.Unlock()
-
- // With that we are good to go and can start sending metrics
- return nil
-}
-
-func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
- addr := ae.prepareAgentAddress()
- var dialOpts []grpc.DialOption
- if ae.clientTransportCredentials != nil {
- dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
- } else if ae.canDialInsecure {
- dialOpts = append(dialOpts, grpc.WithInsecure())
- }
- if ae.compressor != "" {
- dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
- }
- dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
-
- ctx := context.Background()
- if len(ae.headers) > 0 {
- ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
- }
- return grpc.DialContext(ctx, addr, dialOpts...)
-}
-
-func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
- // Note: We haven't yet implemented configuration sending so we
- // should NOT be changing connection states within this function for now.
- for {
- recv, err := configStream.Recv()
- if err != nil {
- // TODO: Check if this is a transient error or exponential backoff-able.
- return err
- }
- cfg := recv.Config
- if cfg == nil {
- continue
- }
-
- // Otherwise now apply the trace configuration sent down from the agent
- if psamp := cfg.GetProbabilitySampler(); psamp != nil {
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
- } else if csamp := cfg.GetConstantSampler(); csamp != nil {
- alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
- if alwaysSample {
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
- } else {
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
- }
- } else { // TODO: Add the rate limiting sampler here
- }
-
- // Then finally send back to upstream the newly applied configuration
- err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
- if err != nil {
- return err
- }
- }
-}
-
-// Stop shuts down all the connections and resources
-// related to the exporter.
-func (ae *Exporter) Stop() error {
- ae.mu.RLock()
- cc := ae.grpcClientConn
- started := ae.started
- stopped := ae.stopped
- ae.mu.RUnlock()
-
- if !started {
- return errNotStarted
- }
- if stopped {
- // TODO: tell the user that we've already stopped, so perhaps a sentinel error?
- return nil
- }
-
- ae.Flush()
-
- // Now close the underlying gRPC connection.
- var err error
- if cc != nil {
- err = cc.Close()
- }
-
- // At this point we can change the state variables: started and stopped
- ae.mu.Lock()
- ae.started = false
- ae.stopped = true
- ae.mu.Unlock()
- close(ae.stopCh)
-
- // Ensure that the backgroundConnector returns
- <-ae.backgroundConnectionDoneCh
-
- return err
-}
-
-func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
- if sd == nil {
- return
- }
- _ = ae.traceBundler.Add(sd, 1)
-}
-
-func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
- if batch == nil || len(batch.Spans) == 0 {
- return nil
- }
-
- select {
- case <-ae.stopCh:
- return errStopped
-
- default:
- if !ae.connected() {
- return errNoConnection
- }
-
- ae.senderMu.Lock()
- err := ae.traceExporter.Send(batch)
- ae.senderMu.Unlock()
- if err != nil {
- ae.setStateDisconnected()
- return err
- }
- return nil
- }
-}
-
-func (ae *Exporter) ExportView(vd *view.Data) {
- if vd == nil {
- return
- }
- _ = ae.viewDataBundler.Add(vd, 1)
-}
-
-func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
- if len(sdl) == 0 {
- return nil
- }
- protoSpans := make([]*tracepb.Span, 0, len(sdl))
- for _, sd := range sdl {
- if sd != nil {
- protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
- }
- }
- return protoSpans
-}
-
-func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
- select {
- case <-ae.stopCh:
- return
-
- default:
- if !ae.connected() {
- return
- }
-
- protoSpans := ocSpanDataToPbSpans(sdl)
- if len(protoSpans) == 0 {
- return
- }
- ae.senderMu.Lock()
- err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
- Spans: protoSpans,
- })
- ae.senderMu.Unlock()
- if err != nil {
- ae.setStateDisconnected()
- }
- }
-}
-
-func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
- if len(vdl) == 0 {
- return nil
- }
- metrics := make([]*metricspb.Metric, 0, len(vdl))
- for _, vd := range vdl {
- if vd != nil {
- vmetric, err := viewDataToMetric(vd)
- // TODO: (@odeke-em) somehow report this error, if it is non-nil.
- if err == nil && vmetric != nil {
- metrics = append(metrics, vmetric)
- }
- }
- }
- return metrics
-}
-
-func (ae *Exporter) uploadViewData(vdl []*view.Data) {
- select {
- case <-ae.stopCh:
- return
-
- default:
- if !ae.connected() {
- return
- }
-
- protoMetrics := ocViewDataToPbMetrics(vdl)
- if len(protoMetrics) == 0 {
- return
- }
- err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
- Metrics: protoMetrics,
- // TODO:(@odeke-em)
- // a) Figure out how to derive a Node from the environment
- // b) Figure out how to derive a Resource from the environment
- // or better letting users of the exporter configure it.
- })
- if err != nil {
- ae.setStateDisconnected()
- }
- }
-}
-
-func (ae *Exporter) Flush() {
- ae.traceBundler.Flush()
- ae.viewDataBundler.Flush()
-}
-
-func resourceProtoFromEnv() *resourcepb.Resource {
- rs, _ := resource.FromEnv(context.Background())
- if rs == nil {
- return nil
- }
-
- rprs := &resourcepb.Resource{
- Type: rs.Type,
- }
- if rs.Labels != nil {
- rprs.Labels = make(map[string]string)
- for k, v := range rs.Labels {
- rprs.Labels[k] = v
- }
- }
- return rprs
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
deleted file mode 100644
index 3e05ae8b3..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "time"
-
- "google.golang.org/grpc/credentials"
-)
-
-const (
- DefaultAgentPort uint16 = 55678
- DefaultAgentHost string = "localhost"
-)
-
-type ExporterOption interface {
- withExporter(e *Exporter)
-}
-
-type insecureGrpcConnection int
-
-var _ ExporterOption = (*insecureGrpcConnection)(nil)
-
-func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
- e.canDialInsecure = true
-}
-
-// WithInsecure disables client transport security for the exporter's gRPC connection
-// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
-// does. Note, by default, client security is required unless WithInsecure is used.
-func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
-
-type addressSetter string
-
-func (as addressSetter) withExporter(e *Exporter) {
- e.agentAddress = string(as)
-}
-
-var _ ExporterOption = (*addressSetter)(nil)
-
-// WithAddress allows one to set the address that the exporter will
-// connect to the agent on. If unset, it will instead try to use
-// connect to DefaultAgentHost:DefaultAgentPort
-func WithAddress(addr string) ExporterOption {
- return addressSetter(addr)
-}
-
-type serviceNameSetter string
-
-func (sns serviceNameSetter) withExporter(e *Exporter) {
- e.serviceName = string(sns)
-}
-
-var _ ExporterOption = (*serviceNameSetter)(nil)
-
-// WithServiceName allows one to set/override the service name
-// that the exporter will report to the agent.
-func WithServiceName(serviceName string) ExporterOption {
- return serviceNameSetter(serviceName)
-}
-
-type reconnectionPeriod time.Duration
-
-func (rp reconnectionPeriod) withExporter(e *Exporter) {
- e.reconnectionPeriod = time.Duration(rp)
-}
-
-func WithReconnectionPeriod(rp time.Duration) ExporterOption {
- return reconnectionPeriod(rp)
-}
-
-type compressorSetter string
-
-func (c compressorSetter) withExporter(e *Exporter) {
- e.compressor = string(c)
-}
-
-// UseCompressor will set the compressor for the gRPC client to use when sending requests.
-// It is the responsibility of the caller to ensure that the compressor set has been registered
-// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
-// compressors auto-register on import, such as gzip, which can be registered by calling
-// `import _ "google.golang.org/grpc/encoding/gzip"`
-func UseCompressor(compressorName string) ExporterOption {
- return compressorSetter(compressorName)
-}
-
-type headerSetter map[string]string
-
-func (h headerSetter) withExporter(e *Exporter) {
- e.headers = map[string]string(h)
-}
-
-// WithHeaders will send the provided headers when the gRPC stream connection
-// is instantiated
-func WithHeaders(headers map[string]string) ExporterOption {
- return headerSetter(headers)
-}
-
-type clientCredentials struct {
- credentials.TransportCredentials
-}
-
-var _ ExporterOption = (*clientCredentials)(nil)
-
-// WithTLSCredentials allows the connection to use TLS credentials
-// when talking to the server. It takes in grpc.TransportCredentials instead
-// of say a Certificate file or a tls.Certificate, because the retrieving
-// these credentials can be done in many ways e.g. plain file, in code tls.Config
-// or by certificate rotation, so it is up to the caller to decide what to use.
-func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
- return &clientCredentials{TransportCredentials: creds}
-}
-
-func (cc *clientCredentials) withExporter(e *Exporter) {
- e.clientTransportCredentials = cc.TransportCredentials
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
deleted file mode 100644
index 983ebe7b7..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "math"
- "time"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/tracestate"
-
- tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
- "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-const (
- maxAnnotationEventsPerSpan = 32
- maxMessageEventsPerSpan = 128
-)
-
-func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
- if sd == nil {
- return nil
- }
- var namePtr *tracepb.TruncatableString
- if sd.Name != "" {
- namePtr = &tracepb.TruncatableString{Value: sd.Name}
- }
- return &tracepb.Span{
- TraceId: sd.TraceID[:],
- SpanId: sd.SpanID[:],
- ParentSpanId: sd.ParentSpanID[:],
- Status: ocStatusToProtoStatus(sd.Status),
- StartTime: timeToTimestamp(sd.StartTime),
- EndTime: timeToTimestamp(sd.EndTime),
- Links: ocLinksToProtoLinks(sd.Links),
- Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
- Name: namePtr,
- Attributes: ocAttributesToProtoAttributes(sd.Attributes),
- TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
- Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
- }
-}
-
-var blankStatus trace.Status
-
-func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
- if status == blankStatus {
- return nil
- }
- return &tracepb.Status{
- Code: status.Code,
- Message: status.Message,
- }
-}
-
-func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
- if len(links) == 0 {
- return nil
- }
-
- sl := make([]*tracepb.Span_Link, 0, len(links))
- for _, ocLink := range links {
- // This redefinition is necessary to prevent ocLink.*ID[:] copies
- // being reused -- in short we need a new ocLink per iteration.
- ocLink := ocLink
-
- sl = append(sl, &tracepb.Span_Link{
- TraceId: ocLink.TraceID[:],
- SpanId: ocLink.SpanID[:],
- Type: ocLinkTypeToProtoLinkType(ocLink.Type),
- })
- }
-
- return &tracepb.Span_Links{
- Link: sl,
- }
-}
-
-func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
- switch oct {
- case trace.LinkTypeChild:
- return tracepb.Span_Link_CHILD_LINKED_SPAN
- case trace.LinkTypeParent:
- return tracepb.Span_Link_PARENT_LINKED_SPAN
- default:
- return tracepb.Span_Link_TYPE_UNSPECIFIED
- }
-}
-
-func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
- if len(attrs) == 0 {
- return nil
- }
- outMap := make(map[string]*tracepb.AttributeValue)
- for k, v := range attrs {
- switch v := v.(type) {
- case bool:
- outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
-
- case int:
- outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
-
- case int64:
- outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
-
- case string:
- outMap[k] = &tracepb.AttributeValue{
- Value: &tracepb.AttributeValue_StringValue{
- StringValue: &tracepb.TruncatableString{Value: v},
- },
- }
- }
- }
- return &tracepb.Span_Attributes{
- AttributeMap: outMap,
- }
-}
-
-// This code is mostly copied from
-// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
-func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
- if len(as) == 0 && len(es) == 0 {
- return nil
- }
-
- timeEvents := &tracepb.Span_TimeEvents{}
- var annotations, droppedAnnotationsCount int
- var messageEvents, droppedMessageEventsCount int
-
- // Transform annotations
- for i, a := range as {
- if annotations >= maxAnnotationEventsPerSpan {
- droppedAnnotationsCount = len(as) - i
- break
- }
- annotations++
- timeEvents.TimeEvent = append(timeEvents.TimeEvent,
- &tracepb.Span_TimeEvent{
- Time: timeToTimestamp(a.Time),
- Value: transformAnnotationToTimeEvent(&a),
- },
- )
- }
-
- // Transform message events
- for i, e := range es {
- if messageEvents >= maxMessageEventsPerSpan {
- droppedMessageEventsCount = len(es) - i
- break
- }
- messageEvents++
- timeEvents.TimeEvent = append(timeEvents.TimeEvent,
- &tracepb.Span_TimeEvent{
- Time: timeToTimestamp(e.Time),
- Value: transformMessageEventToTimeEvent(&e),
- },
- )
- }
-
- // Process dropped counter
- timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
- timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
-
- return timeEvents
-}
-
-func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
- return &tracepb.Span_TimeEvent_Annotation_{
- Annotation: &tracepb.Span_TimeEvent_Annotation{
- Description: &tracepb.TruncatableString{Value: a.Message},
- Attributes: ocAttributesToProtoAttributes(a.Attributes),
- },
- }
-}
-
-func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
- return &tracepb.Span_TimeEvent_MessageEvent_{
- MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
- Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
- Id: uint64(e.MessageID),
- UncompressedSize: uint64(e.UncompressedByteSize),
- CompressedSize: uint64(e.CompressedByteSize),
- },
- }
-}
-
-// clip32 clips an int to the range of an int32.
-func clip32(x int) int32 {
- if x < math.MinInt32 {
- return math.MinInt32
- }
- if x > math.MaxInt32 {
- return math.MaxInt32
- }
- return int32(x)
-}
-
-func timeToTimestamp(t time.Time) *timestamp.Timestamp {
- nanoTime := t.UnixNano()
- return ×tamp.Timestamp{
- Seconds: nanoTime / 1e9,
- Nanos: int32(nanoTime % 1e9),
- }
-}
-
-func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
- switch kind {
- case trace.SpanKindClient:
- return tracepb.Span_CLIENT
- case trace.SpanKindServer:
- return tracepb.Span_SERVER
- default:
- return tracepb.Span_SPAN_KIND_UNSPECIFIED
- }
-}
-
-func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
- if ts == nil {
- return nil
- }
- return &tracepb.Span_Tracestate{
- Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
- }
-}
-
-func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
- protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
- for _, entry := range entries {
- protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
- Key: entry.Key,
- Value: entry.Value,
- })
- }
- return protoEntries
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
deleted file mode 100644
index 43f18dec1..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "errors"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-
- "github.com/golang/protobuf/ptypes/timestamp"
-
- metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
-)
-
-var (
- errNilMeasure = errors.New("expecting a non-nil stats.Measure")
- errNilView = errors.New("expecting a non-nil view.View")
- errNilViewData = errors.New("expecting a non-nil view.Data")
-)
-
-func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
- if vd == nil {
- return nil, errNilViewData
- }
-
- descriptor, err := viewToMetricDescriptor(vd.View)
- if err != nil {
- return nil, err
- }
-
- timeseries, err := viewDataToTimeseries(vd)
- if err != nil {
- return nil, err
- }
-
- metric := &metricspb.Metric{
- MetricDescriptor: descriptor,
- Timeseries: timeseries,
- }
- return metric, nil
-}
-
-func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
- if v == nil {
- return nil, errNilView
- }
- if v.Measure == nil {
- return nil, errNilMeasure
- }
-
- desc := &metricspb.MetricDescriptor{
- Name: stringOrCall(v.Name, v.Measure.Name),
- Description: stringOrCall(v.Description, v.Measure.Description),
- Unit: v.Measure.Unit(),
- Type: aggregationToMetricDescriptorType(v),
- LabelKeys: tagKeysToLabelKeys(v.TagKeys),
- }
- return desc, nil
-}
-
-func stringOrCall(first string, call func() string) string {
- if first != "" {
- return first
- }
- return call()
-}
-
-type measureType uint
-
-const (
- measureUnknown measureType = iota
- measureInt64
- measureFloat64
-)
-
-func measureTypeFromMeasure(m stats.Measure) measureType {
- switch m.(type) {
- default:
- return measureUnknown
- case *stats.Float64Measure:
- return measureFloat64
- case *stats.Int64Measure:
- return measureInt64
- }
-}
-
-func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
- if v == nil || v.Aggregation == nil {
- return metricspb.MetricDescriptor_UNSPECIFIED
- }
- if v.Measure == nil {
- return metricspb.MetricDescriptor_UNSPECIFIED
- }
-
- switch v.Aggregation.Type {
- case view.AggTypeCount:
- // Cumulative on int64
- return metricspb.MetricDescriptor_CUMULATIVE_INT64
-
- case view.AggTypeDistribution:
- // Cumulative types
- return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
-
- case view.AggTypeLastValue:
- // Gauge types
- switch measureTypeFromMeasure(v.Measure) {
- case measureFloat64:
- return metricspb.MetricDescriptor_GAUGE_DOUBLE
- case measureInt64:
- return metricspb.MetricDescriptor_GAUGE_INT64
- }
-
- case view.AggTypeSum:
- // Cumulative types
- switch measureTypeFromMeasure(v.Measure) {
- case measureFloat64:
- return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
- case measureInt64:
- return metricspb.MetricDescriptor_CUMULATIVE_INT64
- }
- }
-
- // For all other cases, return unspecified.
- return metricspb.MetricDescriptor_UNSPECIFIED
-}
-
-func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
- labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
- for _, tagKey := range tagKeys {
- labelKeys = append(labelKeys, &metricspb.LabelKey{
- Key: tagKey.Name(),
- })
- }
- return labelKeys
-}
-
-func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
- if vd == nil || len(vd.Rows) == 0 {
- return nil, nil
- }
-
- // Given that view.Data only contains Start, End
- // the timestamps for all the row data will be the exact same
- // per aggregation. However, the values will differ.
- // Each row has its own tags.
- startTimestamp := timeToProtoTimestamp(vd.Start)
- endTimestamp := timeToProtoTimestamp(vd.End)
-
- mType := measureTypeFromMeasure(vd.View.Measure)
- timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
- // It is imperative that the ordering of "LabelValues" matches those
- // of the Label keys in the metric descriptor.
- for _, row := range vd.Rows {
- labelValues := labelValuesFromTags(row.Tags)
- point := rowToPoint(vd.View, row, endTimestamp, mType)
- timeseries = append(timeseries, &metricspb.TimeSeries{
- StartTimestamp: startTimestamp,
- LabelValues: labelValues,
- Points: []*metricspb.Point{point},
- })
- }
-
- if len(timeseries) == 0 {
- return nil, nil
- }
-
- return timeseries, nil
-}
-
-func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
- unixNano := t.UnixNano()
- return ×tamp.Timestamp{
- Seconds: int64(unixNano / 1e9),
- Nanos: int32(unixNano % 1e9),
- }
-}
-
-func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
- pt := &metricspb.Point{
- Timestamp: endTimestamp,
- }
-
- switch data := row.Data.(type) {
- case *view.CountData:
- pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
-
- case *view.DistributionData:
- pt.Value = &metricspb.Point_DistributionValue{
- DistributionValue: &metricspb.DistributionValue{
- Count: data.Count,
- Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
- // TODO: Add Exemplar
- Buckets: bucketsToProtoBuckets(data.CountPerBucket),
- BucketOptions: &metricspb.DistributionValue_BucketOptions{
- Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
- Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
- Bounds: v.Aggregation.Buckets,
- },
- },
- },
- SumOfSquaredDeviation: data.SumOfSquaredDev,
- }}
-
- case *view.LastValueData:
- setPointValue(pt, data.Value, mType)
-
- case *view.SumData:
- setPointValue(pt, data.Value, mType)
- }
-
- return pt
-}
-
-// Not returning anything from this function because metricspb.Point.is_Value is an unexported
-// interface hence we just have to set its value by pointer.
-func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
- if mType == measureInt64 {
- pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
- } else {
- pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
- }
-}
-
-func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
- distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
- for i := 0; i < len(countPerBucket); i++ {
- count := countPerBucket[i]
-
- distBuckets[i] = &metricspb.DistributionValue_Bucket{
- Count: count,
- }
- }
-
- return distBuckets
-}
-
-func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
- if len(tags) == 0 {
- return nil
- }
-
- labelValues := make([]*metricspb.LabelValue, 0, len(tags))
- for _, tag_ := range tags {
- labelValues = append(labelValues, &metricspb.LabelValue{
- Value: tag_.Value,
-
- // It is imperative that we set the "HasValue" attribute,
- // in order to distinguish missing a label from the empty string.
- // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
- //
- // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
- // so the best case that we can use to distinguish missing labels/tags from the
- // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
- // a value.
- HasValue: tag_.Key.Name() != "",
- })
- }
- return labelValues
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
deleted file mode 100644
index 68be4c75b..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-const Version = "0.0.1"
diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE
similarity index 100%
rename from vendor/github.com/Azure/go-autorest/LICENSE
rename to vendor/github.com/Azure/go-autorest/autorest/LICENSE
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
similarity index 94%
rename from vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
rename to vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
index 261eeb9e9..b9d6a27ea 100644
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
@@ -1,3 +1,4 @@
+
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -175,18 +176,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
index 7b0c4bc4d..fec416a9c 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
@@ -135,7 +135,7 @@ resource := "https://management.core.windows.net/"
applicationSecret := "APPLICATION_SECRET"
spt, err := adal.NewServicePrincipalToken(
- oauthConfig,
+ *oauthConfig,
appliationID,
applicationSecret,
resource,
@@ -170,7 +170,7 @@ if err != nil {
}
spt, err := adal.NewServicePrincipalTokenFromCertificate(
- oauthConfig,
+ *oauthConfig,
applicationID,
certificate,
rsaPrivateKey,
@@ -195,7 +195,7 @@ oauthClient := &http.Client{}
// Acquire the device code
deviceCode, err := adal.InitiateDeviceAuth(
oauthClient,
- oauthConfig,
+ *oauthConfig,
applicationID,
resource)
if err != nil {
@@ -212,7 +212,7 @@ if err != nil {
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
- oauthConfig,
+ *oauthConfig,
applicationID,
resource,
*token,
@@ -227,7 +227,7 @@ if (err == nil) {
```Go
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
- oauthConfig,
+ *oauthConfig,
applicationID,
username,
password,
@@ -243,11 +243,11 @@ if (err == nil) {
``` Go
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
- oauthConfig,
+ *oauthConfig,
applicationID,
clientSecret,
- authorizationCode,
- redirectURI,
+ authorizationCode,
+ redirectURI,
resource,
callbacks...)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
index 8c83a917f..fa5964742 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
@@ -15,10 +15,15 @@ package adal
// limitations under the License.
import (
+ "errors"
"fmt"
"net/url"
)
+const (
+ activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
+)
+
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
@@ -60,7 +65,6 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
- const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
@@ -89,3 +93,59 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV
DeviceCodeEndpoint: *deviceCodeURL,
}, nil
}
+
+// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
+type MultiTenantOAuthConfig interface {
+ PrimaryTenant() *OAuthConfig
+ AuxiliaryTenants() []*OAuthConfig
+}
+
+// OAuthOptions contains optional OAuthConfig creation arguments.
+type OAuthOptions struct {
+ APIVersion string
+}
+
+func (c OAuthOptions) apiVersion() string {
+ if c.APIVersion != "" {
+ return fmt.Sprintf("?api-version=%s", c.APIVersion)
+ }
+ return "1.0"
+}
+
+// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
+// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
+func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
+ if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
+ return nil, errors.New("must specify one to three auxiliary tenants")
+ }
+ mtCfg := multiTenantOAuthConfig{
+ cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
+ }
+ apiVer := options.apiVersion()
+ pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
+ }
+ mtCfg.cfgs[0] = pri
+ for i := range auxiliaryTenantIDs {
+ aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
+ if err != nil {
+ return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
+ }
+ mtCfg.cfgs[i+1] = aux
+ }
+ return mtCfg, nil
+}
+
+type multiTenantOAuthConfig struct {
+ // first config in the slice is the primary tenant
+ cfgs []*OAuthConfig
+}
+
+func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
+ return m.cfgs[0]
+}
+
+func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
+ return m.cfgs[1:]
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
new file mode 100644
index 000000000..66db8b9e2
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest/adal
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest/date v0.1.0
+ github.com/Azure/go-autorest/autorest/mocks v0.1.0
+ github.com/Azure/go-autorest/tracing v0.5.0
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
new file mode 100644
index 000000000..9525ff736
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
@@ -0,0 +1,12 @@
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
index 834401e00..d7e4372bb 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
@@ -15,7 +15,12 @@ package adal
// limitations under the License.
import (
+ "crypto/tls"
"net/http"
+ "net/http/cookiejar"
+ "sync"
+
+ "github.com/Azure/go-autorest/tracing"
)
const (
@@ -23,6 +28,9 @@ const (
mimeTypeFormPost = "application/x-www-form-urlencoded"
)
+var defaultSender Sender
+var defaultSenderInit = &sync.Once{}
+
// Sender is the interface that wraps the Do method to send HTTP requests.
//
// The standard http.Client conforms to this interface.
@@ -45,7 +53,7 @@ type SendDecorator func(Sender) Sender
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
func CreateSender(decorators ...SendDecorator) Sender {
- return DecorateSender(&http.Client{}, decorators...)
+ return DecorateSender(sender(), decorators...)
}
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
@@ -58,3 +66,30 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
}
return s
}
+
+func sender() Sender {
+ // note that we can't init defaultSender in init() since it will
+ // execute before calling code has had a chance to enable tracing
+ defaultSenderInit.Do(func() {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ transport := &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ defaultSender = &http.Client{Jar: j, Transport: roundTripper}
+ })
+ return defaultSender
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
index effa87ab2..b72753498 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
@@ -34,7 +34,6 @@ import (
"time"
"github.com/Azure/go-autorest/autorest/date"
- "github.com/Azure/go-autorest/tracing"
"github.com/dgrijalva/jwt-go"
)
@@ -71,6 +70,12 @@ type OAuthTokenProvider interface {
OAuthToken() string
}
+// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization.
+type MultitenantOAuthTokenProvider interface {
+ PrimaryOAuthToken() string
+ AuxiliaryOAuthTokens() []string
+}
+
// TokenRefreshError is an interface used by errors returned during token refresh.
type TokenRefreshError interface {
error
@@ -390,7 +395,7 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
spt.refreshLock = &sync.RWMutex{}
}
if spt.sender == nil {
- spt.sender = &http.Client{Transport: tracing.Transport}
+ spt.sender = sender()
}
return nil
}
@@ -438,7 +443,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
- sender: &http.Client{Transport: tracing.Transport},
+ sender: sender(),
refreshCallbacks: callbacks,
}
return spt, nil
@@ -679,7 +684,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
- sender: &http.Client{Transport: tracing.Transport},
+ sender: sender(),
refreshCallbacks: callbacks,
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
}
@@ -983,3 +988,93 @@ func (spt *ServicePrincipalToken) Token() Token {
defer spt.refreshLock.RUnlock()
return spt.inner.Token
}
+
+// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization.
+type MultiTenantServicePrincipalToken struct {
+ PrimaryToken *ServicePrincipalToken
+ AuxiliaryTokens []*ServicePrincipalToken
+}
+
+// PrimaryOAuthToken returns the primary authorization token.
+func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string {
+ return mt.PrimaryToken.OAuthToken()
+}
+
+// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens.
+func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
+ tokens := make([]string, len(mt.AuxiliaryTokens))
+ for i := range mt.AuxiliaryTokens {
+ tokens[i] = mt.AuxiliaryTokens[i].OAuthToken()
+ }
+ return tokens
+}
+
+// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
+// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
+func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
+ if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.EnsureFreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// RefreshWithContext obtains a fresh token for the Service Principal.
+func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
+ if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.RefreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// RefreshExchangeWithContext refreshes the token, but for a different resource.
+func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
+ if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
+func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(secret, "secret"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ auxTenants := multiTenantCfg.AuxiliaryTenants()
+ m := MultiTenantServicePrincipalToken{
+ AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
+ }
+ primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
+ }
+ m.PrimaryToken = primary
+ for i := range auxTenants {
+ aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
+ }
+ m.AuxiliaryTokens[i] = aux
+ }
+ return &m, nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
index 2e24b4b39..54e87b5b6 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
@@ -15,6 +15,7 @@ package autorest
// limitations under the License.
import (
+ "crypto/tls"
"encoding/base64"
"fmt"
"net/http"
@@ -22,7 +23,6 @@ import (
"strings"
"github.com/Azure/go-autorest/autorest/adal"
- "github.com/Azure/go-autorest/tracing"
)
const (
@@ -149,11 +149,11 @@ type BearerAuthorizerCallback struct {
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
// is invoked when the HTTP request is submitted.
-func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
- if sender == nil {
- sender = &http.Client{Transport: tracing.Transport}
+func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
+ if s == nil {
+ s = sender(tls.RenegotiateNever)
}
- return &BearerAuthorizerCallback{sender: sender, callback: callback}
+ return &BearerAuthorizerCallback{sender: s, callback: callback}
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
@@ -285,3 +285,52 @@ func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
}
+
+// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
+type MultiTenantServicePrincipalTokenAuthorizer interface {
+ WithAuthorization() PrepareDecorator
+}
+
+// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
+func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
+ return &multiTenantSPTAuthorizer{tp: tp}
+}
+
+type multiTenantSPTAuthorizer struct {
+ tp adal.MultitenantOAuthTokenProvider
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
+// primary token along with the auxiliary authorization header using the auxiliary tokens.
+//
+// By default, the token will be automatically refreshed through the Refresher interface.
+func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+ if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
+ err = refresher.EnsureFreshWithContext(r.Context())
+ if err != nil {
+ var resp *http.Response
+ if tokError, ok := err.(adal.TokenRefreshError); ok {
+ resp = tokError.Response()
+ }
+ return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
+ "Failed to refresh one or more Tokens for request to %s", r.URL)
+ }
+ }
+ r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
+ if err != nil {
+ return r, err
+ }
+ auxTokens := mt.tp.AuxiliaryOAuthTokens()
+ for i := range auxTokens {
+ auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
+ }
+ return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; ")))
+ })
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
index 02d011961..1cb41cbeb 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
@@ -417,6 +417,11 @@ func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest
}
req = req.WithContext(ctx)
+ preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...)
+ req, err = preparer.Prepare(req)
+ if err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request")
+ }
pt.resp, err = sender.Do(req)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
index cdde41418..6c20b8179 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -45,6 +45,7 @@ type ResourceIdentifier struct {
Datalake string `json:"datalake"`
Batch string `json:"batch"`
OperationalInsights string `json:"operationalInsights"`
+ Storage string `json:"storage"`
}
// Environment represents a set of endpoints for each of Azure's Clouds.
@@ -103,6 +104,7 @@ var (
Datalake: "https://datalake.azure.net/",
Batch: "https://batch.core.windows.net/",
OperationalInsights: "https://api.loganalytics.io",
+ Storage: "https://storage.azure.com/",
},
}
@@ -135,6 +137,7 @@ var (
Datalake: NotAvailable,
Batch: "https://batch.core.usgovcloudapi.net/",
OperationalInsights: "https://api.loganalytics.us",
+ Storage: "https://storage.azure.com/",
},
}
@@ -167,6 +170,7 @@ var (
Datalake: NotAvailable,
Batch: "https://batch.chinacloudapi.cn/",
OperationalInsights: NotAvailable,
+ Storage: "https://storage.azure.com/",
},
}
@@ -199,6 +203,7 @@ var (
Datalake: NotAvailable,
Batch: "https://batch.cloudapi.de/",
OperationalInsights: NotAvailable,
+ Storage: "https://storage.azure.com/",
},
}
)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go
index cfc7ed757..1c6a0617a 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/client.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/client.go
@@ -22,12 +22,10 @@ import (
"io/ioutil"
"log"
"net/http"
- "net/http/cookiejar"
"strings"
"time"
"github.com/Azure/go-autorest/logger"
- "github.com/Azure/go-autorest/tracing"
)
const (
@@ -73,6 +71,22 @@ type Response struct {
*http.Response `json:"-"`
}
+// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code.
+// If there was no response (i.e. the underlying http.Response is nil) the return value is false.
+func (r Response) IsHTTPStatus(statusCode int) bool {
+ if r.Response == nil {
+ return false
+ }
+ return r.Response.StatusCode == statusCode
+}
+
+// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes.
+// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided
+// the return value is false.
+func (r Response) HasHTTPStatus(statusCodes ...int) bool {
+ return ResponseHasStatusCode(r.Response, statusCodes...)
+}
+
// LoggingInspector implements request and response inspectors that log the full request and
// response to a supplied log.
type LoggingInspector struct {
@@ -248,30 +262,8 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
// sender returns the Sender to which to send requests.
func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
if c.Sender == nil {
- // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
- var defaultTransport = http.DefaultTransport.(*http.Transport)
- transport := tracing.Transport
- // for non-default values of TLS renegotiation create a new tracing transport.
- // updating tracing.Transport affects all clients which is not what we want.
- if renengotiation != tls.RenegotiateNever {
- transport = tracing.NewTransport()
- }
- transport.Base = &http.Transport{
- Proxy: defaultTransport.Proxy,
- DialContext: defaultTransport.DialContext,
- MaxIdleConns: defaultTransport.MaxIdleConns,
- IdleConnTimeout: defaultTransport.IdleConnTimeout,
- TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
- ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
- TLSClientConfig: &tls.Config{
- MinVersion: tls.VersionTLS12,
- Renegotiation: renengotiation,
- },
- }
- j, _ := cookiejar.New(nil)
- return &http.Client{Jar: j, Transport: transport}
+ return sender(renengotiation)
}
-
return c.Sender
}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
similarity index 94%
rename from vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
rename to vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
index d64569567..b9d6a27ea 100644
--- a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
@@ -176,18 +176,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod
new file mode 100644
index 000000000..13a1e9803
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod
@@ -0,0 +1,3 @@
+module github.com/Azure/go-autorest/autorest/date
+
+go 1.12
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod
new file mode 100644
index 000000000..ab2ae66ac
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest/autorest/adal v0.5.0
+ github.com/Azure/go-autorest/autorest/mocks v0.2.0
+ github.com/Azure/go-autorest/logger v0.1.0
+ github.com/Azure/go-autorest/tracing v0.5.0
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum
new file mode 100644
index 000000000..729b99cd0
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum
@@ -0,0 +1,18 @@
+github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
index 6d67bd733..6e8ed64eb 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
@@ -16,7 +16,9 @@ package autorest
import (
"bytes"
+ "context"
"encoding/json"
+ "encoding/xml"
"fmt"
"io"
"io/ioutil"
@@ -31,11 +33,33 @@ const (
mimeTypeOctetStream = "application/octet-stream"
mimeTypeFormPost = "application/x-www-form-urlencoded"
- headerAuthorization = "Authorization"
- headerContentType = "Content-Type"
- headerUserAgent = "User-Agent"
+ headerAuthorization = "Authorization"
+ headerAuxAuthorization = "x-ms-authorization-auxiliary"
+ headerContentType = "Content-Type"
+ headerUserAgent = "User-Agent"
)
+// used as a key type in context.WithValue()
+type ctxPrepareDecorators struct{}
+
+// WithPrepareDecorators adds the specified PrepareDecorators to the provided context.
+// If no PrepareDecorators are provided the context is unchanged.
+func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context {
+ if len(prepareDecorator) == 0 {
+ return ctx
+ }
+ return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator)
+}
+
+// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators.
+func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator {
+ inCtx := ctx.Value(ctxPrepareDecorators{})
+ if pd, ok := inCtx.([]PrepareDecorator); ok {
+ return pd
+ }
+ return defaultPrepareDecorators
+}
+
// Preparer is the interface that wraps the Prepare method.
//
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
@@ -190,6 +214,9 @@ func AsGet() PrepareDecorator { return WithMethod("GET") }
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
+// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE.
+func AsMerge() PrepareDecorator { return WithMethod("MERGE") }
+
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
@@ -225,6 +252,25 @@ func WithBaseURL(baseURL string) PrepareDecorator {
}
}
+// WithBytes returns a PrepareDecorator that takes a list of bytes
+// which passes the bytes directly to the body
+func WithBytes(input *[]byte) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if input == nil {
+ return r, fmt.Errorf("Input Bytes was nil")
+ }
+
+ r.ContentLength = int64(len(*input))
+ r.Body = ioutil.NopCloser(bytes.NewReader(*input))
+ }
+ return r, err
+ })
+ }
+}
+
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
@@ -377,6 +423,28 @@ func WithJSON(v interface{}) PrepareDecorator {
}
}
+// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the
+// request and sets the Content-Length header.
+func WithXML(v interface{}) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ b, err := xml.Marshal(v)
+ if err == nil {
+ // we have to tack on an XML header
+ withHeader := xml.Header + string(b)
+ bytesWithHeader := []byte(withHeader)
+
+ r.ContentLength = int64(len(bytesWithHeader))
+ r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
+ }
+ }
+ return r, err
+ })
+ }
+}
+
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
// is absolute (that is, it begins with a "/"), it replaces the existing path.
func WithPath(path string) PrepareDecorator {
@@ -455,7 +523,7 @@ func parseURL(u *url.URL, path string) (*url.URL, error) {
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
// given in the supplied map (i.e., key=value).
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
- parameters := ensureValueStrings(queryParameters)
+ parameters := MapToValues(queryParameters)
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
@@ -463,14 +531,16 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato
if r.URL == nil {
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
}
-
v := r.URL.Query()
for key, value := range parameters {
- d, err := url.QueryUnescape(value)
- if err != nil {
- return r, err
+ for i := range value {
+ d, err := url.QueryUnescape(value[i])
+ if err != nil {
+ return r, err
+ }
+ value[i] = d
}
- v.Add(key, d)
+ v[key] = value
}
r.URL.RawQuery = v.Encode()
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go
index a908a0adb..349e1963a 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/responder.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go
@@ -153,6 +153,25 @@ func ByClosingIfError() RespondDecorator {
}
}
+// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the
+// response Body into the value pointed to by v.
+func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil {
+ bytes, errInner := ioutil.ReadAll(resp.Body)
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
+ } else {
+ *v = bytes
+ }
+ }
+ return err
+ })
+ }
+}
+
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
// response Body into the value pointed to by v.
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go
index 6665d7c00..5e595d7b1 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go
@@ -15,16 +15,40 @@ package autorest
// limitations under the License.
import (
+ "context"
+ "crypto/tls"
"fmt"
"log"
"math"
"net/http"
+ "net/http/cookiejar"
"strconv"
"time"
"github.com/Azure/go-autorest/tracing"
)
+// used as a key type in context.WithValue()
+type ctxSendDecorators struct{}
+
+// WithSendDecorators adds the specified SendDecorators to the provided context.
+// If no SendDecorators are provided the context is unchanged.
+func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context {
+ if len(sendDecorator) == 0 {
+ return ctx
+ }
+ return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator)
+}
+
+// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators.
+func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator {
+ inCtx := ctx.Value(ctxSendDecorators{})
+ if sd, ok := inCtx.([]SendDecorator); ok {
+ return sd
+ }
+ return defaultSendDecorators
+}
+
// Sender is the interface that wraps the Do method to send HTTP requests.
//
// The standard http.Client conforms to this interface.
@@ -47,7 +71,7 @@ type SendDecorator func(Sender) Sender
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
func CreateSender(decorators ...SendDecorator) Sender {
- return DecorateSender(&http.Client{}, decorators...)
+ return DecorateSender(sender(tls.RenegotiateNever), decorators...)
}
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
@@ -70,7 +94,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
//
// Send will not poll or retry requests.
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
- return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
+ return SendWithSender(sender(tls.RenegotiateNever), r, decorators...)
}
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
@@ -82,6 +106,29 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht
return DecorateSender(s, decorators...).Do(r)
}
+func sender(renengotiation tls.RenegotiationSupport) Sender {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ transport := &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ Renegotiation: renengotiation,
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ return &http.Client{Jar: j, Transport: roundTripper}
+}
+
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
// invoking the Sender. The delay may be terminated by closing the optional channel on the
// http.Request. If canceled, no further Senders are invoked.
@@ -211,53 +258,73 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
// number of attempts, exponentially backing off between requests using the supplied backoff
-// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
-// the http.Request.
+// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
+// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts.
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
return func(s Sender) Sender {
- return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
- rr := NewRetriableRequest(r)
- // Increment to add the first call (attempts denotes number of retries)
- for attempt := 0; attempt < attempts+1; {
- err = rr.Prepare()
- if err != nil {
- return resp, err
- }
- resp, err = s.Do(rr.Request())
- // if the error isn't temporary don't bother retrying
- if err != nil && !IsTemporaryNetworkError(err) {
- return nil, err
- }
- // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
- // resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
- if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
- return resp, err
- }
- delayed := DelayWithRetryAfter(resp, r.Context().Done())
- if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
- return resp, r.Context().Err()
- }
- // don't count a 429 against the number of attempts
- // so that we continue to retry until it succeeds
- if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
- attempt++
- }
- }
- return resp, err
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...)
})
}
}
-// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
-// responses with status code 429
+// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the
+// specified number of attempts, exponentially backing off between requests using the supplied backoff
+// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater
+// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request.
+func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...)
+ })
+ }
+}
+
+func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) {
+ rr := NewRetriableRequest(r)
+ // Increment to add the first call (attempts denotes number of retries)
+ for attempt := 0; attempt < attempts+1; {
+ err = rr.Prepare()
+ if err != nil {
+ return
+ }
+ resp, err = s.Do(rr.Request())
+ // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
+ // resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
+ if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
+ return resp, err
+ }
+ delayed := DelayWithRetryAfter(resp, r.Context().Done())
+ if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) {
+ return resp, r.Context().Err()
+ }
+ // when count429 == false don't count a 429 against the number
+ // of attempts so that we continue to retry until it succeeds
+ if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) {
+ attempt++
+ }
+ }
+ return resp, err
+}
+
+// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header.
+// The value of Retry-After can be either the number of seconds or a date in RFC1123 format.
+// The function returns true after successfully waiting for the specified duration. If there is
+// no Retry-After header or the wait is cancelled the return value is false.
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
if resp == nil {
return false
}
- retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
- if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
+ var dur time.Duration
+ ra := resp.Header.Get("Retry-After")
+ if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
+ dur = time.Duration(retryAfter) * time.Second
+ } else if t, err := time.Parse(time.RFC1123, ra); err == nil {
+ dur = t.Sub(time.Now())
+ }
+ if dur > 0 {
select {
- case <-time.After(time.Duration(retryAfter) * time.Second):
+ case <-time.After(dur):
return true
case <-cancel:
return false
@@ -317,8 +384,22 @@ func WithLogging(logger *log.Logger) SendDecorator {
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
// count.
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
+ return DelayForBackoffWithCap(backoff, 0, attempt, cancel)
+}
+
+// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of
+// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
+// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap.
+// The delay may be canceled by closing the passed channel. If terminated early, returns false.
+// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
+// count.
+func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool {
+ d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second
+ if cap > 0 && d > cap {
+ d = cap
+ }
select {
- case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
+ case <-time.After(d):
return true
case <-cancel:
return false
diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go
index 0c8d9d224..7a71089c9 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/version.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/version.go
@@ -19,7 +19,7 @@ import (
"runtime"
)
-const number = "v12.0.0"
+const number = "v13.0.2"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod
new file mode 100644
index 000000000..f22ed56bc
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/go.mod
@@ -0,0 +1,3 @@
+module github.com/Azure/go-autorest/logger
+
+go 1.12
diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod
new file mode 100644
index 000000000..25c34c108
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod
@@ -0,0 +1,3 @@
+module github.com/Azure/go-autorest/tracing
+
+go 1.12
diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
index 28951c284..0e7a6e962 100644
--- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go
+++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
@@ -16,180 +16,52 @@ package tracing
import (
"context"
- "fmt"
"net/http"
- "os"
-
- "contrib.go.opencensus.io/exporter/ocagent"
- "go.opencensus.io/plugin/ochttp"
- "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/trace"
)
+// Tracer represents an HTTP tracing facility.
+type Tracer interface {
+ NewTransport(base *http.Transport) http.RoundTripper
+ StartSpan(ctx context.Context, name string) context.Context
+ EndSpan(ctx context.Context, httpStatusCode int, err error)
+}
+
var (
- // Transport is the default tracing RoundTripper. The custom options setter will control
- // if traces are being emitted or not.
- Transport = NewTransport()
-
- // enabled is the flag for marking if tracing is enabled.
- enabled = false
-
- // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
- // it will be using the parent sampler or the default.
- sampler = trace.NeverSample()
-
- // Views for metric instrumentation.
- views = map[string]*view.View{}
-
- // the trace exporter
- traceExporter trace.Exporter
+ tracer Tracer
)
-func init() {
- enableFromEnv()
+// Register will register the provided Tracer. Pass nil to unregister a Tracer.
+func Register(t Tracer) {
+ tracer = t
}
-func enableFromEnv() {
- _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
- _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
- if ok || legacyOk {
- agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
-
- if ok {
- EnableWithAIForwarding(agentEndpoint)
- } else {
- Enable()
- }
- }
-}
-
-// NewTransport returns a new instance of a tracing-aware RoundTripper.
-func NewTransport() *ochttp.Transport {
- return &ochttp.Transport{
- Propagation: &tracecontext.HTTPFormat{},
- GetStartOptions: getStartOptions,
- }
-}
-
-// IsEnabled returns true if monitoring is enabled for the sdk.
+// IsEnabled returns true if a Tracer has been registered.
func IsEnabled() bool {
- return enabled
+ return tracer != nil
}
-// Enable will start instrumentation for metrics and traces.
-func Enable() error {
- enabled = true
- sampler = nil
-
- err := initStats()
- return err
+// NewTransport creates a new instrumenting http.RoundTripper for the
+// registered Tracer. If no Tracer has been registered it returns nil.
+func NewTransport(base *http.Transport) http.RoundTripper {
+ if tracer != nil {
+ return tracer.NewTransport(base)
+ }
+ return nil
}
-// Disable will disable instrumentation for metrics and traces.
-func Disable() {
- disableStats()
- sampler = trace.NeverSample()
- if traceExporter != nil {
- trace.UnregisterExporter(traceExporter)
- }
- enabled = false
-}
-
-// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
-// exporter making the metrics and traces available in app insights.
-func EnableWithAIForwarding(agentEndpoint string) (err error) {
- err = Enable()
- if err != nil {
- return err
- }
-
- traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
- if err != nil {
- return err
- }
- trace.RegisterExporter(traceExporter)
- return
-}
-
-// getStartOptions is the custom options setter for the ochttp package.
-func getStartOptions(*http.Request) trace.StartOptions {
- return trace.StartOptions{
- Sampler: sampler,
- }
-}
-
-// initStats registers the views for the http metrics
-func initStats() (err error) {
- clientViews := []*view.View{
- ochttp.ClientCompletedCount,
- ochttp.ClientRoundtripLatencyDistribution,
- ochttp.ClientReceivedBytesDistribution,
- ochttp.ClientSentBytesDistribution,
- }
- for _, cv := range clientViews {
- vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
- views[vn] = cv.WithName(vn)
- err = view.Register(views[vn])
- if err != nil {
- return err
- }
- }
- return
-}
-
-// disableStats will unregister the previously registered metrics
-func disableStats() {
- for _, v := range views {
- view.Unregister(v)
- }
-}
-
-// StartSpan starts a trace span
+// StartSpan starts a trace span with the specified name, associating it with the
+// provided context. Has no effect if a Tracer has not been registered.
func StartSpan(ctx context.Context, name string) context.Context {
- ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
+ if tracer != nil {
+ return tracer.StartSpan(ctx, name)
+ }
return ctx
}
-// EndSpan ends a previously started span stored in the context
+// EndSpan ends a previously started span stored in the context.
+// Has no effect if a Tracer has not been registered.
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
- span := trace.FromContext(ctx)
-
- if span == nil {
- return
- }
-
- if err != nil {
- span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
- }
- span.End()
-}
-
-// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
-// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
-func toTraceStatusCode(httpStatusCode int) int32 {
- switch {
- case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
- return trace.StatusCodeOK
- case httpStatusCode == http.StatusBadRequest:
- return trace.StatusCodeInvalidArgument
- case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
- return trace.StatusCodeUnauthenticated
- case httpStatusCode == http.StatusForbidden:
- return trace.StatusCodePermissionDenied
- case httpStatusCode == http.StatusNotFound:
- return trace.StatusCodeNotFound
- case httpStatusCode == http.StatusTooManyRequests:
- return trace.StatusCodeResourceExhausted
- case httpStatusCode == 499:
- return trace.StatusCodeCancelled
- case httpStatusCode == http.StatusNotImplemented:
- return trace.StatusCodeUnimplemented
- case httpStatusCode == http.StatusServiceUnavailable:
- return trace.StatusCodeUnavailable
- case httpStatusCode == http.StatusGatewayTimeout:
- return trace.StatusCodeDeadlineExceeded
- default:
- return trace.StatusCodeUnknown
+ if tracer != nil {
+ tracer.EndSpan(ctx, httpStatusCode, err)
}
}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
deleted file mode 100644
index e068e731e..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
deleted file mode 100644
index 12b578d06..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/agent/common/v1/common.proto
-
-package v1
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type LibraryInfo_Language int32
-
-const (
- LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
- LibraryInfo_CPP LibraryInfo_Language = 1
- LibraryInfo_C_SHARP LibraryInfo_Language = 2
- LibraryInfo_ERLANG LibraryInfo_Language = 3
- LibraryInfo_GO_LANG LibraryInfo_Language = 4
- LibraryInfo_JAVA LibraryInfo_Language = 5
- LibraryInfo_NODE_JS LibraryInfo_Language = 6
- LibraryInfo_PHP LibraryInfo_Language = 7
- LibraryInfo_PYTHON LibraryInfo_Language = 8
- LibraryInfo_RUBY LibraryInfo_Language = 9
-)
-
-var LibraryInfo_Language_name = map[int32]string{
- 0: "LANGUAGE_UNSPECIFIED",
- 1: "CPP",
- 2: "C_SHARP",
- 3: "ERLANG",
- 4: "GO_LANG",
- 5: "JAVA",
- 6: "NODE_JS",
- 7: "PHP",
- 8: "PYTHON",
- 9: "RUBY",
-}
-
-var LibraryInfo_Language_value = map[string]int32{
- "LANGUAGE_UNSPECIFIED": 0,
- "CPP": 1,
- "C_SHARP": 2,
- "ERLANG": 3,
- "GO_LANG": 4,
- "JAVA": 5,
- "NODE_JS": 6,
- "PHP": 7,
- "PYTHON": 8,
- "RUBY": 9,
-}
-
-func (x LibraryInfo_Language) String() string {
- return proto.EnumName(LibraryInfo_Language_name, int32(x))
-}
-
-func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{2, 0}
-}
-
-// Identifier metadata of the Node that produces the span or tracing data.
-// Note, this is not the metadata about the Node or service that is described by associated spans.
-// In the future we plan to extend the identifier proto definition to support
-// additional information (e.g cloud id, etc.)
-type Node struct {
- // Identifier that uniquely identifies a process within a VM/container.
- Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
- // Information on the OpenCensus Library that initiates the stream.
- LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
- // Additional information on service.
- ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
- // Additional attributes.
- Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Node) Reset() { *m = Node{} }
-func (m *Node) String() string { return proto.CompactTextString(m) }
-func (*Node) ProtoMessage() {}
-func (*Node) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{0}
-}
-
-func (m *Node) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Node.Unmarshal(m, b)
-}
-func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Node.Marshal(b, m, deterministic)
-}
-func (m *Node) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Node.Merge(m, src)
-}
-func (m *Node) XXX_Size() int {
- return xxx_messageInfo_Node.Size(m)
-}
-func (m *Node) XXX_DiscardUnknown() {
- xxx_messageInfo_Node.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Node proto.InternalMessageInfo
-
-func (m *Node) GetIdentifier() *ProcessIdentifier {
- if m != nil {
- return m.Identifier
- }
- return nil
-}
-
-func (m *Node) GetLibraryInfo() *LibraryInfo {
- if m != nil {
- return m.LibraryInfo
- }
- return nil
-}
-
-func (m *Node) GetServiceInfo() *ServiceInfo {
- if m != nil {
- return m.ServiceInfo
- }
- return nil
-}
-
-func (m *Node) GetAttributes() map[string]string {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-// Identifier that uniquely identifies a process within a VM/container.
-type ProcessIdentifier struct {
- // The host name. Usually refers to the machine/container name.
- // For example: os.Hostname() in Go, socket.gethostname() in Python.
- HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
- // Process id.
- Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
- // Start time of this ProcessIdentifier. Represented in epoch time.
- StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
-func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
-func (*ProcessIdentifier) ProtoMessage() {}
-func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{1}
-}
-
-func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
-}
-func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
-}
-func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProcessIdentifier.Merge(m, src)
-}
-func (m *ProcessIdentifier) XXX_Size() int {
- return xxx_messageInfo_ProcessIdentifier.Size(m)
-}
-func (m *ProcessIdentifier) XXX_DiscardUnknown() {
- xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
-
-func (m *ProcessIdentifier) GetHostName() string {
- if m != nil {
- return m.HostName
- }
- return ""
-}
-
-func (m *ProcessIdentifier) GetPid() uint32 {
- if m != nil {
- return m.Pid
- }
- return 0
-}
-
-func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.StartTimestamp
- }
- return nil
-}
-
-// Information on OpenCensus Library.
-type LibraryInfo struct {
- // Language of OpenCensus Library.
- Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
- // Version of Agent exporter of Library.
- ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
- // Version of OpenCensus Library.
- CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
-func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
-func (*LibraryInfo) ProtoMessage() {}
-func (*LibraryInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{2}
-}
-
-func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
-}
-func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
-}
-func (m *LibraryInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LibraryInfo.Merge(m, src)
-}
-func (m *LibraryInfo) XXX_Size() int {
- return xxx_messageInfo_LibraryInfo.Size(m)
-}
-func (m *LibraryInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
-
-func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
- if m != nil {
- return m.Language
- }
- return LibraryInfo_LANGUAGE_UNSPECIFIED
-}
-
-func (m *LibraryInfo) GetExporterVersion() string {
- if m != nil {
- return m.ExporterVersion
- }
- return ""
-}
-
-func (m *LibraryInfo) GetCoreLibraryVersion() string {
- if m != nil {
- return m.CoreLibraryVersion
- }
- return ""
-}
-
-// Additional service information.
-type ServiceInfo struct {
- // Name of the service.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
-func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
-func (*ServiceInfo) ProtoMessage() {}
-func (*ServiceInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{3}
-}
-
-func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
-}
-func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
-}
-func (m *ServiceInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceInfo.Merge(m, src)
-}
-func (m *ServiceInfo) XXX_Size() int {
- return xxx_messageInfo_ServiceInfo.Size(m)
-}
-func (m *ServiceInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
-
-func (m *ServiceInfo) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
- proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
- proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
- proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
- proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
- proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
-}
-
-var fileDescriptor_126c72ed8a252c84 = []byte{
- // 590 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
- 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
- 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
- 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
- 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
- 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
- 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
- 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
- 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
- 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
- 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
- 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
- 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
- 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
- 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
- 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
- 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
- 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
- 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
- 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
- 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
- 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
- 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
- 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
- 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
- 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
- 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
- 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
- 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
- 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
- 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
- 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
- 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
- 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
- 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
- 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
- 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
deleted file mode 100644
index 801212d92..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
- v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
- v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportMetricsServiceRequest struct {
- // This is required only in the first message on the stream or if the
- // previous sent ExportMetricsServiceRequest message has a different Node (e.g.
- // when the same RPC is used to send Metrics from multiple Applications).
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // A list of metrics that belong to the last received Node.
- Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
- // The resource for the metrics in this message that do not have an explicit
- // resource set.
- // If unset, the most recently set resource in the RPC stream applies. It is
- // valid to never be set within a stream, e.g. when no resource info is known
- // at all or when all sent metrics have an explicit resource set.
- Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
-func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceRequest) ProtoMessage() {}
-func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_47e253a956287d04, []int{0}
-}
-
-func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b)
-}
-func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
-}
-func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
-}
-func (m *ExportMetricsServiceRequest) XXX_Size() int {
- return xxx_messageInfo_ExportMetricsServiceRequest.Size(m)
-}
-func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
-
-func (m *ExportMetricsServiceRequest) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric {
- if m != nil {
- return m.Metrics
- }
- return nil
-}
-
-func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-type ExportMetricsServiceResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
-func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceResponse) ProtoMessage() {}
-func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_47e253a956287d04, []int{1}
-}
-
-func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b)
-}
-func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
-}
-func (m *ExportMetricsServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ExportMetricsServiceResponse.Size(m)
-}
-func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest")
- proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04)
-}
-
-var fileDescriptor_47e253a956287d04 = []byte{
- // 340 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40,
- 0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45,
- 0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f,
- 0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24,
- 0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26,
- 0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5,
- 0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09,
- 0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54,
- 0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1,
- 0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4,
- 0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7,
- 0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c,
- 0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58,
- 0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6,
- 0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b,
- 0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb,
- 0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90,
- 0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a,
- 0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab,
- 0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa,
- 0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3,
- 0x1b, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// MetricsServiceClient is the client API for MetricsService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MetricsServiceClient interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
-}
-
-type metricsServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
- return &metricsServiceClient{cc}
-}
-
-func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
- stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
- if err != nil {
- return nil, err
- }
- x := &metricsServiceExportClient{stream}
- return x, nil
-}
-
-type MetricsService_ExportClient interface {
- Send(*ExportMetricsServiceRequest) error
- Recv() (*ExportMetricsServiceResponse, error)
- grpc.ClientStream
-}
-
-type metricsServiceExportClient struct {
- grpc.ClientStream
-}
-
-func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
- m := new(ExportMetricsServiceResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// MetricsServiceServer is the server API for MetricsService service.
-type MetricsServiceServer interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(MetricsService_ExportServer) error
-}
-
-func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
- s.RegisterService(&_MetricsService_serviceDesc, srv)
-}
-
-func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
-}
-
-type MetricsService_ExportServer interface {
- Send(*ExportMetricsServiceResponse) error
- Recv() (*ExportMetricsServiceRequest, error)
- grpc.ServerStream
-}
-
-type metricsServiceExportServer struct {
- grpc.ServerStream
-}
-
-func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
- m := new(ExportMetricsServiceRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _MetricsService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
- HandlerType: (*MetricsServiceServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Export",
- Handler: _MetricsService_Export_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
deleted file mode 100644
index e7c49a387..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/agent/trace/v1/trace_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
- v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type CurrentLibraryConfig struct {
- // This is required only in the first message on the stream or if the
- // previous sent CurrentLibraryConfig message has a different Node (e.g.
- // when the same RPC is used to configure multiple Applications).
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // Current configuration.
- Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} }
-func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) }
-func (*CurrentLibraryConfig) ProtoMessage() {}
-func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{0}
-}
-
-func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b)
-}
-func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic)
-}
-func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CurrentLibraryConfig.Merge(m, src)
-}
-func (m *CurrentLibraryConfig) XXX_Size() int {
- return xxx_messageInfo_CurrentLibraryConfig.Size(m)
-}
-func (m *CurrentLibraryConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo
-
-func (m *CurrentLibraryConfig) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
- if m != nil {
- return m.Config
- }
- return nil
-}
-
-type UpdatedLibraryConfig struct {
- // This field is ignored when the RPC is used to configure only one Application.
- // This is required only in the first message on the stream or if the
- // previous sent UpdatedLibraryConfig message has a different Node.
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // Requested updated configuration.
- Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} }
-func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) }
-func (*UpdatedLibraryConfig) ProtoMessage() {}
-func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{1}
-}
-
-func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b)
-}
-func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic)
-}
-func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src)
-}
-func (m *UpdatedLibraryConfig) XXX_Size() int {
- return xxx_messageInfo_UpdatedLibraryConfig.Size(m)
-}
-func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo
-
-func (m *UpdatedLibraryConfig) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
- if m != nil {
- return m.Config
- }
- return nil
-}
-
-type ExportTraceServiceRequest struct {
- // This is required only in the first message on the stream or if the
- // previous sent ExportTraceServiceRequest message has a different Node (e.g.
- // when the same RPC is used to send Spans from multiple Applications).
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // A list of Spans that belong to the last received Node.
- Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
- // The resource for the spans in this message that do not have an explicit
- // resource set.
- // If unset, the most recently set resource in the RPC stream applies. It is
- // valid to never be set within a stream, e.g. when no resource info is known.
- Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
-func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceRequest) ProtoMessage() {}
-func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{2}
-}
-
-func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b)
-}
-func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
-}
-func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
-}
-func (m *ExportTraceServiceRequest) XXX_Size() int {
- return xxx_messageInfo_ExportTraceServiceRequest.Size(m)
-}
-func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
-
-func (m *ExportTraceServiceRequest) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span {
- if m != nil {
- return m.Spans
- }
- return nil
-}
-
-func (m *ExportTraceServiceRequest) GetResource() *v12.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-type ExportTraceServiceResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
-func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceResponse) ProtoMessage() {}
-func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{3}
-}
-
-func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b)
-}
-func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
-}
-func (m *ExportTraceServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ExportTraceServiceResponse.Size(m)
-}
-func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig")
- proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig")
- proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest")
- proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5)
-}
-
-var fileDescriptor_7027f99caf7ac6a5 = []byte{
- // 423 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40,
- 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a,
- 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8,
- 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3,
- 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d,
- 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49,
- 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91,
- 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d,
- 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7,
- 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf,
- 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73,
- 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1,
- 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31,
- 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95,
- 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d,
- 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90,
- 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d,
- 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d,
- 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89,
- 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44,
- 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e,
- 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae,
- 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59,
- 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94,
- 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00,
- 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf,
- 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// TraceServiceClient is the client API for TraceService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type TraceServiceClient interface {
- // After initialization, this RPC must be kept alive for the entire life of
- // the application. The agent pushes configs down to applications via a
- // stream.
- Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
-}
-
-type traceServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
- return &traceServiceClient{cc}
-}
-
-func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
- stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
- if err != nil {
- return nil, err
- }
- x := &traceServiceConfigClient{stream}
- return x, nil
-}
-
-type TraceService_ConfigClient interface {
- Send(*CurrentLibraryConfig) error
- Recv() (*UpdatedLibraryConfig, error)
- grpc.ClientStream
-}
-
-type traceServiceConfigClient struct {
- grpc.ClientStream
-}
-
-func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
- m := new(UpdatedLibraryConfig)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
- stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
- if err != nil {
- return nil, err
- }
- x := &traceServiceExportClient{stream}
- return x, nil
-}
-
-type TraceService_ExportClient interface {
- Send(*ExportTraceServiceRequest) error
- Recv() (*ExportTraceServiceResponse, error)
- grpc.ClientStream
-}
-
-type traceServiceExportClient struct {
- grpc.ClientStream
-}
-
-func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
- m := new(ExportTraceServiceResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// TraceServiceServer is the server API for TraceService service.
-type TraceServiceServer interface {
- // After initialization, this RPC must be kept alive for the entire life of
- // the application. The agent pushes configs down to applications via a
- // stream.
- Config(TraceService_ConfigServer) error
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(TraceService_ExportServer) error
-}
-
-func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
- s.RegisterService(&_TraceService_serviceDesc, srv)
-}
-
-func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
-}
-
-type TraceService_ConfigServer interface {
- Send(*UpdatedLibraryConfig) error
- Recv() (*CurrentLibraryConfig, error)
- grpc.ServerStream
-}
-
-type traceServiceConfigServer struct {
- grpc.ServerStream
-}
-
-func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
- m := new(CurrentLibraryConfig)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
-}
-
-type TraceService_ExportServer interface {
- Send(*ExportTraceServiceResponse) error
- Recv() (*ExportTraceServiceRequest, error)
- grpc.ServerStream
-}
-
-type traceServiceExportServer struct {
- grpc.ServerStream
-}
-
-func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
- m := new(ExportTraceServiceRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _TraceService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
- HandlerType: (*TraceServiceServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Config",
- Handler: _TraceService_Config_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "Export",
- Handler: _TraceService_Export_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
deleted file mode 100644
index bd4b8a827..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: opencensus/proto/agent/trace/v1/trace_service.proto
-
-/*
-Package v1 is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package v1
-
-import (
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.Export(ctx)
- if err != nil {
- grpclog.Infof("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- newReader, berr := utilities.IOReaderFactory(req.Body)
- if berr != nil {
- return nil, metadata, berr
- }
- dec := marshaler.NewDecoder(newReader())
- handleSend := func() error {
- var protoReq ExportTraceServiceRequest
- err := dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Infof("Failed to decode request: %v", err)
- return err
- }
- if err := stream.Send(&protoReq); err != nil {
- grpclog.Infof("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Infof("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Infof("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Infof("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterTraceServiceHandler(ctx, mux, conn)
-}
-
-// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
-}
-
-// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
-// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "TraceServiceClient" to call the correct interceptors.
-func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
-
- mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
-)
-
-var (
- forward_TraceService_Export_0 = runtime.ForwardResponseStream
-)
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
deleted file mode 100644
index 53b8aa99e..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
+++ /dev/null
@@ -1,1126 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/metrics/v1/metrics.proto
-
-package v1
-
-import (
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- wrappers "github.com/golang/protobuf/ptypes/wrappers"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// The kind of metric. It describes how the data is reported.
-//
-// A gauge is an instantaneous measurement of a value.
-//
-// A cumulative measurement is a value accumulated over a time interval. In
-// a time series, cumulative measurements should have the same start time,
-// increasing values and increasing end times, until an event resets the
-// cumulative value to zero and sets a new start time for the following
-// points.
-type MetricDescriptor_Type int32
-
-const (
- // Do not use this default value.
- MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
- // Integer gauge. The value can go both up and down.
- MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
- // Floating point gauge. The value can go both up and down.
- MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
- // Distribution gauge measurement. The count and sum can go both up and
- // down. Recorded values are always >= 0.
- // Used in scenarios like a snapshot of time the current items in a queue
- // have spent there.
- MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
- // Integer cumulative measurement. The value cannot decrease, if resets
- // then the start_time should also be reset.
- MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
- // Floating point cumulative measurement. The value cannot decrease, if
- // resets then the start_time should also be reset. Recorded values are
- // always >= 0.
- MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
- // Distribution cumulative measurement. The count and sum cannot decrease,
- // if resets then the start_time should also be reset.
- MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
- // Some frameworks implemented Histograms as a summary of observations
- // (usually things like request durations and response sizes). While it
- // also provides a total count of observations and a sum of all observed
- // values, it calculates configurable percentiles over a sliding time
- // window. This is not recommended, since it cannot be aggregated.
- MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
-)
-
-var MetricDescriptor_Type_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "GAUGE_INT64",
- 2: "GAUGE_DOUBLE",
- 3: "GAUGE_DISTRIBUTION",
- 4: "CUMULATIVE_INT64",
- 5: "CUMULATIVE_DOUBLE",
- 6: "CUMULATIVE_DISTRIBUTION",
- 7: "SUMMARY",
-}
-
-var MetricDescriptor_Type_value = map[string]int32{
- "UNSPECIFIED": 0,
- "GAUGE_INT64": 1,
- "GAUGE_DOUBLE": 2,
- "GAUGE_DISTRIBUTION": 3,
- "CUMULATIVE_INT64": 4,
- "CUMULATIVE_DOUBLE": 5,
- "CUMULATIVE_DISTRIBUTION": 6,
- "SUMMARY": 7,
-}
-
-func (x MetricDescriptor_Type) String() string {
- return proto.EnumName(MetricDescriptor_Type_name, int32(x))
-}
-
-func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{1, 0}
-}
-
-// Defines a Metric which has one or more timeseries.
-type Metric struct {
- // The descriptor of the Metric.
- // TODO(issue #152): consider only sending the name of descriptor for
- // optimization.
- MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
- // One or more timeseries for a single metric, where each timeseries has
- // one or more points.
- Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
- // The resource for the metric. If unset, it may be set to a default value
- // provided for a sequence of messages in an RPC stream.
- Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{0}
-}
-
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-func (m *Metric) GetMetricDescriptor() *MetricDescriptor {
- if m != nil {
- return m.MetricDescriptor
- }
- return nil
-}
-
-func (m *Metric) GetTimeseries() []*TimeSeries {
- if m != nil {
- return m.Timeseries
- }
- return nil
-}
-
-func (m *Metric) GetResource() *v1.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-// Defines a metric type and its schema.
-type MetricDescriptor struct {
- // The metric type, including its DNS name prefix. It must be unique.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // A detailed description of the metric, which can be used in documentation.
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- // The unit in which the metric value is reported. Follows the format
- // described by http://unitsofmeasure.org/ucum.html.
- Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
- Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"`
- // The label keys associated with the metric descriptor.
- LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} }
-func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) }
-func (*MetricDescriptor) ProtoMessage() {}
-func (*MetricDescriptor) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{1}
-}
-
-func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b)
-}
-func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic)
-}
-func (m *MetricDescriptor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricDescriptor.Merge(m, src)
-}
-func (m *MetricDescriptor) XXX_Size() int {
- return xxx_messageInfo_MetricDescriptor.Size(m)
-}
-func (m *MetricDescriptor) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricDescriptor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo
-
-func (m *MetricDescriptor) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *MetricDescriptor) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-func (m *MetricDescriptor) GetUnit() string {
- if m != nil {
- return m.Unit
- }
- return ""
-}
-
-func (m *MetricDescriptor) GetType() MetricDescriptor_Type {
- if m != nil {
- return m.Type
- }
- return MetricDescriptor_UNSPECIFIED
-}
-
-func (m *MetricDescriptor) GetLabelKeys() []*LabelKey {
- if m != nil {
- return m.LabelKeys
- }
- return nil
-}
-
-// Defines a label key associated with a metric descriptor.
-type LabelKey struct {
- // The key for the label.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // A human-readable description of what this label key represents.
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LabelKey) Reset() { *m = LabelKey{} }
-func (m *LabelKey) String() string { return proto.CompactTextString(m) }
-func (*LabelKey) ProtoMessage() {}
-func (*LabelKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{2}
-}
-
-func (m *LabelKey) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelKey.Unmarshal(m, b)
-}
-func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic)
-}
-func (m *LabelKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelKey.Merge(m, src)
-}
-func (m *LabelKey) XXX_Size() int {
- return xxx_messageInfo_LabelKey.Size(m)
-}
-func (m *LabelKey) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelKey.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelKey proto.InternalMessageInfo
-
-func (m *LabelKey) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *LabelKey) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-// A collection of data points that describes the time-varying values
-// of a metric.
-type TimeSeries struct {
- // Must be present for cumulative metrics. The time when the cumulative value
- // was reset to zero. Exclusive. The cumulative value is over the time interval
- // (start_timestamp, timestamp]. If not specified, the backend can use the
- // previous recorded value.
- StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
- // The set of label values that uniquely identify this timeseries. Applies to
- // all points. The order of label values must match that of label keys in the
- // metric descriptor.
- LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
- // The data points of this timeseries. Point.value type MUST match the
- // MetricDescriptor.type.
- Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TimeSeries) Reset() { *m = TimeSeries{} }
-func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
-func (*TimeSeries) ProtoMessage() {}
-func (*TimeSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{3}
-}
-
-func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
-}
-func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
-}
-func (m *TimeSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TimeSeries.Merge(m, src)
-}
-func (m *TimeSeries) XXX_Size() int {
- return xxx_messageInfo_TimeSeries.Size(m)
-}
-func (m *TimeSeries) XXX_DiscardUnknown() {
- xxx_messageInfo_TimeSeries.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
-
-func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.StartTimestamp
- }
- return nil
-}
-
-func (m *TimeSeries) GetLabelValues() []*LabelValue {
- if m != nil {
- return m.LabelValues
- }
- return nil
-}
-
-func (m *TimeSeries) GetPoints() []*Point {
- if m != nil {
- return m.Points
- }
- return nil
-}
-
-type LabelValue struct {
- // The value for the label.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- // If false the value field is ignored and considered not set.
- // This is used to differentiate a missing label from an empty string.
- HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LabelValue) Reset() { *m = LabelValue{} }
-func (m *LabelValue) String() string { return proto.CompactTextString(m) }
-func (*LabelValue) ProtoMessage() {}
-func (*LabelValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{4}
-}
-
-func (m *LabelValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelValue.Unmarshal(m, b)
-}
-func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic)
-}
-func (m *LabelValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelValue.Merge(m, src)
-}
-func (m *LabelValue) XXX_Size() int {
- return xxx_messageInfo_LabelValue.Size(m)
-}
-func (m *LabelValue) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelValue proto.InternalMessageInfo
-
-func (m *LabelValue) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-func (m *LabelValue) GetHasValue() bool {
- if m != nil {
- return m.HasValue
- }
- return false
-}
-
-// A timestamped measurement.
-type Point struct {
- // The moment when this point was recorded. Inclusive.
- // If not specified, the timestamp will be decided by the backend.
- Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // The actual point value.
- //
- // Types that are valid to be assigned to Value:
- // *Point_Int64Value
- // *Point_DoubleValue
- // *Point_DistributionValue
- // *Point_SummaryValue
- Value isPoint_Value `protobuf_oneof:"value"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Point) Reset() { *m = Point{} }
-func (m *Point) String() string { return proto.CompactTextString(m) }
-func (*Point) ProtoMessage() {}
-func (*Point) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{5}
-}
-
-func (m *Point) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Point.Unmarshal(m, b)
-}
-func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Point.Marshal(b, m, deterministic)
-}
-func (m *Point) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Point.Merge(m, src)
-}
-func (m *Point) XXX_Size() int {
- return xxx_messageInfo_Point.Size(m)
-}
-func (m *Point) XXX_DiscardUnknown() {
- xxx_messageInfo_Point.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Point proto.InternalMessageInfo
-
-func (m *Point) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
- }
- return nil
-}
-
-type isPoint_Value interface {
- isPoint_Value()
-}
-
-type Point_Int64Value struct {
- Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
-}
-
-type Point_DoubleValue struct {
- DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
-}
-
-type Point_DistributionValue struct {
- DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
-}
-
-type Point_SummaryValue struct {
- SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"`
-}
-
-func (*Point_Int64Value) isPoint_Value() {}
-
-func (*Point_DoubleValue) isPoint_Value() {}
-
-func (*Point_DistributionValue) isPoint_Value() {}
-
-func (*Point_SummaryValue) isPoint_Value() {}
-
-func (m *Point) GetValue() isPoint_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Point) GetInt64Value() int64 {
- if x, ok := m.GetValue().(*Point_Int64Value); ok {
- return x.Int64Value
- }
- return 0
-}
-
-func (m *Point) GetDoubleValue() float64 {
- if x, ok := m.GetValue().(*Point_DoubleValue); ok {
- return x.DoubleValue
- }
- return 0
-}
-
-func (m *Point) GetDistributionValue() *DistributionValue {
- if x, ok := m.GetValue().(*Point_DistributionValue); ok {
- return x.DistributionValue
- }
- return nil
-}
-
-func (m *Point) GetSummaryValue() *SummaryValue {
- if x, ok := m.GetValue().(*Point_SummaryValue); ok {
- return x.SummaryValue
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Point) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Point_Int64Value)(nil),
- (*Point_DoubleValue)(nil),
- (*Point_DistributionValue)(nil),
- (*Point_SummaryValue)(nil),
- }
-}
-
-// Distribution contains summary statistics for a population of values. It
-// optionally contains a histogram representing the distribution of those
-// values across a set of buckets.
-type DistributionValue struct {
- // The number of values in the population. Must be non-negative. This value
- // must equal the sum of the values in bucket_counts if a histogram is
- // provided.
- Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
- // The sum of the values in the population. If count is zero then this field
- // must be zero.
- Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"`
- // The sum of squared deviations from the mean of the values in the
- // population. For values x_i this is:
- //
- // Sum[i=1..n]((x_i - mean)^2)
- //
- // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
- // describes Welford's method for accumulating this sum in one pass.
- //
- // If count is zero then this field must be zero.
- SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"`
- // Don't change bucket boundaries within a TimeSeries if your backend doesn't
- // support this.
- // TODO(issue #152): consider not required to send bucket options for
- // optimization.
- BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"`
- // If the distribution does not have a histogram, then omit this field.
- // If there is a histogram, then the sum of the values in the Bucket counts
- // must equal the value in the count field of the distribution.
- Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue) Reset() { *m = DistributionValue{} }
-func (m *DistributionValue) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue) ProtoMessage() {}
-func (*DistributionValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6}
-}
-
-func (m *DistributionValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue.Unmarshal(m, b)
-}
-func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue.Merge(m, src)
-}
-func (m *DistributionValue) XXX_Size() int {
- return xxx_messageInfo_DistributionValue.Size(m)
-}
-func (m *DistributionValue) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue proto.InternalMessageInfo
-
-func (m *DistributionValue) GetCount() int64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *DistributionValue) GetSum() float64 {
- if m != nil {
- return m.Sum
- }
- return 0
-}
-
-func (m *DistributionValue) GetSumOfSquaredDeviation() float64 {
- if m != nil {
- return m.SumOfSquaredDeviation
- }
- return 0
-}
-
-func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions {
- if m != nil {
- return m.BucketOptions
- }
- return nil
-}
-
-func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket {
- if m != nil {
- return m.Buckets
- }
- return nil
-}
-
-// A Distribution may optionally contain a histogram of the values in the
-// population. The bucket boundaries for that histogram are described by
-// BucketOptions.
-//
-// If bucket_options has no type, then there is no histogram associated with
-// the Distribution.
-type DistributionValue_BucketOptions struct {
- // Types that are valid to be assigned to Type:
- // *DistributionValue_BucketOptions_Explicit_
- Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} }
-func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_BucketOptions) ProtoMessage() {}
-func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 0}
-}
-
-func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b)
-}
-func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src)
-}
-func (m *DistributionValue_BucketOptions) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_BucketOptions.Size(m)
-}
-func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo
-
-type isDistributionValue_BucketOptions_Type interface {
- isDistributionValue_BucketOptions_Type()
-}
-
-type DistributionValue_BucketOptions_Explicit_ struct {
- Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"`
-}
-
-func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {}
-
-func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type {
- if m != nil {
- return m.Type
- }
- return nil
-}
-
-func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit {
- if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok {
- return x.Explicit
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*DistributionValue_BucketOptions_Explicit_)(nil),
- }
-}
-
-// Specifies a set of buckets with arbitrary upper-bounds.
-// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
-// index i are:
-//
-// [0, bucket_bounds[i]) for i == 0
-// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1
-// [bucket_bounds[i], +infinity) for i == N-1
-type DistributionValue_BucketOptions_Explicit struct {
- // The values must be strictly increasing and > 0.
- Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_BucketOptions_Explicit) Reset() {
- *m = DistributionValue_BucketOptions_Explicit{}
-}
-func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {}
-func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0}
-}
-
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo
-
-func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 {
- if m != nil {
- return m.Bounds
- }
- return nil
-}
-
-type DistributionValue_Bucket struct {
- // The number of values in each bucket of the histogram, as described in
- // bucket_bounds.
- Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
- // If the distribution does not have a histogram, then omit this field.
- Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} }
-func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_Bucket) ProtoMessage() {}
-func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 1}
-}
-
-func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b)
-}
-func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_Bucket.Merge(m, src)
-}
-func (m *DistributionValue_Bucket) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_Bucket.Size(m)
-}
-func (m *DistributionValue_Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo
-
-func (m *DistributionValue_Bucket) GetCount() int64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar {
- if m != nil {
- return m.Exemplar
- }
- return nil
-}
-
-// Exemplars are example points that may be used to annotate aggregated
-// Distribution values. They are metadata that gives information about a
-// particular value added to a Distribution bucket.
-type DistributionValue_Exemplar struct {
- // Value of the exemplar point. It determines which bucket the exemplar
- // belongs to.
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
- // The observation (sampling) time of the above value.
- Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Contextual information about the example value.
- Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} }
-func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_Exemplar) ProtoMessage() {}
-func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 2}
-}
-
-func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b)
-}
-func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src)
-}
-func (m *DistributionValue_Exemplar) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_Exemplar.Size(m)
-}
-func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo
-
-func (m *DistributionValue_Exemplar) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
- }
- return nil
-}
-
-func (m *DistributionValue_Exemplar) GetAttachments() map[string]string {
- if m != nil {
- return m.Attachments
- }
- return nil
-}
-
-// The start_timestamp only applies to the count and sum in the SummaryValue.
-type SummaryValue struct {
- // The total number of recorded values since start_time. Optional since
- // some systems don't expose this.
- Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
- // The total sum of recorded values since start_time. Optional since some
- // systems don't expose this. If count is zero then this field must be zero.
- // This field must be unset if the sum is not available.
- Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
- // Values calculated over an arbitrary time window.
- Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SummaryValue) Reset() { *m = SummaryValue{} }
-func (m *SummaryValue) String() string { return proto.CompactTextString(m) }
-func (*SummaryValue) ProtoMessage() {}
-func (*SummaryValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{7}
-}
-
-func (m *SummaryValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SummaryValue.Unmarshal(m, b)
-}
-func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic)
-}
-func (m *SummaryValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryValue.Merge(m, src)
-}
-func (m *SummaryValue) XXX_Size() int {
- return xxx_messageInfo_SummaryValue.Size(m)
-}
-func (m *SummaryValue) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryValue proto.InternalMessageInfo
-
-func (m *SummaryValue) GetCount() *wrappers.Int64Value {
- if m != nil {
- return m.Count
- }
- return nil
-}
-
-func (m *SummaryValue) GetSum() *wrappers.DoubleValue {
- if m != nil {
- return m.Sum
- }
- return nil
-}
-
-func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-// The values in this message can be reset at arbitrary unknown times, with
-// the requirement that all of them are reset at the same time.
-type SummaryValue_Snapshot struct {
- // The number of values in the snapshot. Optional since some systems don't
- // expose this.
- Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
- // The sum of values in the snapshot. Optional since some systems don't
- // expose this. If count is zero then this field must be zero or not set
- // (if not supported).
- Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
- // A list of values at different percentiles of the distribution calculated
- // from the current snapshot. The percentiles must be strictly increasing.
- PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} }
-func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) }
-func (*SummaryValue_Snapshot) ProtoMessage() {}
-func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{7, 0}
-}
-
-func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b)
-}
-func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic)
-}
-func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src)
-}
-func (m *SummaryValue_Snapshot) XXX_Size() int {
- return xxx_messageInfo_SummaryValue_Snapshot.Size(m)
-}
-func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo
-
-func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value {
- if m != nil {
- return m.Count
- }
- return nil
-}
-
-func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue {
- if m != nil {
- return m.Sum
- }
- return nil
-}
-
-func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile {
- if m != nil {
- return m.PercentileValues
- }
- return nil
-}
-
-// Represents the value at a given percentile of a distribution.
-type SummaryValue_Snapshot_ValueAtPercentile struct {
- // The percentile of a distribution. Must be in the interval
- // (0.0, 100.0].
- Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"`
- // The value at the given percentile of a distribution.
- Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() {
- *m = SummaryValue_Snapshot_ValueAtPercentile{}
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) }
-func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {}
-func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0}
-}
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int {
- return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 {
- if m != nil {
- return m.Percentile
- }
- return 0
-}
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value)
- proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric")
- proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor")
- proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey")
- proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries")
- proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue")
- proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point")
- proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue")
- proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions")
- proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit")
- proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket")
- proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar")
- proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry")
- proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue")
- proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot")
- proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a)
-}
-
-var fileDescriptor_0ee3deb72053811a = []byte{
- // 1098 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5,
- 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xdb, 0xf5, 0xa8, 0xed, 0xdf, 0xda, 0xfc, 0x15, 0xc2,
- 0x22, 0x20, 0x15, 0xca, 0x5a, 0x31, 0xa5, 0xad, 0x2a, 0x54, 0x14, 0xc7, 0x6e, 0x62, 0xc8, 0x87,
- 0x35, 0xb6, 0x2b, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x35, 0xf8,
- 0x05, 0x78, 0x04, 0xae, 0xb9, 0x45, 0x3c, 0x07, 0x57, 0x3c, 0x01, 0x4f, 0x81, 0x78, 0x03, 0xb4,
- 0x33, 0xb3, 0x1f, 0x89, 0xc1, 0xd4, 0x45, 0xe2, 0xee, 0x9c, 0x33, 0xe7, 0xfc, 0xfc, 0x3b, 0x9f,
- 0x5e, 0x78, 0xe4, 0x07, 0xc4, 0xb3, 0x89, 0x47, 0x23, 0xda, 0x08, 0x42, 0x9f, 0xf9, 0x0d, 0x97,
- 0xb0, 0xd0, 0xb1, 0x69, 0x63, 0xb6, 0x9f, 0x88, 0x26, 0x7f, 0x40, 0x5b, 0x99, 0xab, 0xb0, 0x98,
- 0xc9, 0xfb, 0x6c, 0x5f, 0x7f, 0xef, 0xd2, 0xf7, 0x2f, 0xa7, 0x44, 0x60, 0x8c, 0xa3, 0x8b, 0x06,
- 0x73, 0x5c, 0x42, 0x99, 0xe5, 0x06, 0xc2, 0x57, 0xdf, 0xbe, 0xed, 0xf0, 0x6d, 0x68, 0x05, 0x01,
- 0x09, 0x25, 0x96, 0xfe, 0xc9, 0x02, 0x91, 0x90, 0x50, 0x3f, 0x0a, 0x6d, 0x12, 0x33, 0x49, 0x64,
- 0xe1, 0x6c, 0xfc, 0xa1, 0x40, 0xf9, 0x94, 0xff, 0x38, 0x7a, 0x0d, 0x35, 0x41, 0x63, 0x34, 0x21,
- 0xd4, 0x0e, 0x9d, 0x80, 0xf9, 0x61, 0x5d, 0xd9, 0x51, 0x76, 0xd5, 0xe6, 0x9e, 0xb9, 0x84, 0xb1,
- 0x29, 0xe2, 0xdb, 0x69, 0x10, 0xd6, 0xdc, 0x5b, 0x16, 0x74, 0x04, 0xc0, 0xd3, 0x20, 0xa1, 0x43,
- 0x68, 0xbd, 0xb0, 0x53, 0xdc, 0x55, 0x9b, 0x1f, 0x2f, 0x05, 0x1d, 0x38, 0x2e, 0xe9, 0x73, 0x77,
- 0x9c, 0x0b, 0x45, 0x2d, 0xa8, 0x24, 0x19, 0xd4, 0x8b, 0x9c, 0xdb, 0x47, 0x8b, 0x30, 0x69, 0x8e,
- 0xb3, 0x7d, 0x13, 0x4b, 0x19, 0xa7, 0x71, 0xc6, 0x0f, 0x45, 0xd0, 0x6e, 0x73, 0x46, 0x08, 0x4a,
- 0x9e, 0xe5, 0x12, 0x9e, 0xf0, 0x26, 0xe6, 0x32, 0xda, 0x01, 0x35, 0x29, 0x85, 0xe3, 0x7b, 0xf5,
- 0x02, 0x7f, 0xca, 0x9b, 0xe2, 0xa8, 0xc8, 0x73, 0x18, 0xa7, 0xb2, 0x89, 0xb9, 0x8c, 0x5e, 0x42,
- 0x89, 0xcd, 0x03, 0x52, 0x2f, 0xed, 0x28, 0xbb, 0x77, 0x9b, 0xcd, 0x95, 0x4a, 0x67, 0x0e, 0xe6,
- 0x01, 0xc1, 0x3c, 0x1e, 0xb5, 0x01, 0xa6, 0xd6, 0x98, 0x4c, 0x47, 0xd7, 0x64, 0x4e, 0xeb, 0xeb,
- 0xbc, 0x66, 0x1f, 0x2e, 0x45, 0x3b, 0x89, 0xdd, 0xbf, 0x22, 0x73, 0xbc, 0x39, 0x95, 0x12, 0x35,
- 0x7e, 0x52, 0xa0, 0x14, 0x83, 0xa2, 0x7b, 0xa0, 0x0e, 0xcf, 0xfa, 0xbd, 0xce, 0x61, 0xf7, 0x65,
- 0xb7, 0xd3, 0xd6, 0xd6, 0x62, 0xc3, 0xd1, 0xc1, 0xf0, 0xa8, 0x33, 0xea, 0x9e, 0x0d, 0x9e, 0x3c,
- 0xd6, 0x14, 0xa4, 0x41, 0x55, 0x18, 0xda, 0xe7, 0xc3, 0xd6, 0x49, 0x47, 0x2b, 0xa0, 0x87, 0x80,
- 0xa4, 0xa5, 0xdb, 0x1f, 0xe0, 0x6e, 0x6b, 0x38, 0xe8, 0x9e, 0x9f, 0x69, 0x45, 0x74, 0x1f, 0xb4,
- 0xc3, 0xe1, 0xe9, 0xf0, 0xe4, 0x60, 0xd0, 0x7d, 0x95, 0xc4, 0x97, 0xd0, 0x03, 0xa8, 0xe5, 0xac,
- 0x12, 0x64, 0x1d, 0x6d, 0xc1, 0xff, 0xf2, 0xe6, 0x3c, 0x52, 0x19, 0xa9, 0xb0, 0xd1, 0x1f, 0x9e,
- 0x9e, 0x1e, 0xe0, 0xaf, 0xb5, 0x0d, 0xe3, 0x05, 0x54, 0x92, 0x14, 0x90, 0x06, 0xc5, 0x6b, 0x32,
- 0x97, 0xed, 0x88, 0xc5, 0x7f, 0xee, 0x86, 0xf1, 0x9b, 0x02, 0x90, 0xcd, 0x0d, 0x3a, 0x84, 0x7b,
- 0x94, 0x59, 0x21, 0x1b, 0xa5, 0x1b, 0x24, 0xc7, 0x59, 0x37, 0xc5, 0x0a, 0x99, 0xc9, 0x0a, 0xf1,
- 0x69, 0xe3, 0x1e, 0xf8, 0x2e, 0x0f, 0x49, 0x75, 0xf4, 0x25, 0x54, 0x45, 0x17, 0x66, 0xd6, 0x34,
- 0x7a, 0xcb, 0xd9, 0xe5, 0x49, 0xbc, 0x8a, 0xfd, 0xb1, 0x3a, 0x4d, 0x65, 0x8a, 0x9e, 0x43, 0x39,
- 0xf0, 0x1d, 0x8f, 0xd1, 0x7a, 0x91, 0xa3, 0x18, 0x4b, 0x51, 0x7a, 0xb1, 0x2b, 0x96, 0x11, 0xc6,
- 0x17, 0x00, 0x19, 0x2c, 0xba, 0x0f, 0xeb, 0x9c, 0x8f, 0xac, 0x8f, 0x50, 0xd0, 0x16, 0x6c, 0x5e,
- 0x59, 0x54, 0x30, 0xe5, 0xf5, 0xa9, 0xe0, 0xca, 0x95, 0x45, 0x79, 0x88, 0xf1, 0x4b, 0x01, 0xd6,
- 0x39, 0x24, 0x7a, 0x06, 0x9b, 0xab, 0x54, 0x24, 0x73, 0x46, 0xef, 0x83, 0xea, 0x78, 0xec, 0xc9,
- 0xe3, 0xdc, 0x4f, 0x14, 0x8f, 0xd7, 0x30, 0x70, 0xa3, 0x60, 0xf6, 0x01, 0x54, 0x27, 0x7e, 0x34,
- 0x9e, 0x12, 0xe9, 0x13, 0x6f, 0x86, 0x72, 0xbc, 0x86, 0x55, 0x61, 0x15, 0x4e, 0x23, 0x40, 0x13,
- 0x87, 0xb2, 0xd0, 0x19, 0x47, 0x71, 0xe3, 0xa4, 0x6b, 0x89, 0x53, 0x31, 0x97, 0x16, 0xa5, 0x9d,
- 0x0b, 0xe3, 0x58, 0xc7, 0x6b, 0xb8, 0x36, 0xb9, 0x6d, 0x44, 0x3d, 0xb8, 0x43, 0x23, 0xd7, 0xb5,
- 0xc2, 0xb9, 0xc4, 0x5e, 0xe7, 0xd8, 0x8f, 0x96, 0x62, 0xf7, 0x45, 0x44, 0x02, 0x5b, 0xa5, 0x39,
- 0xbd, 0xb5, 0x21, 0x2b, 0x6e, 0xfc, 0x5a, 0x86, 0xda, 0x02, 0x8b, 0xb8, 0x21, 0xb6, 0x1f, 0x79,
- 0x8c, 0xd7, 0xb3, 0x88, 0x85, 0x12, 0x0f, 0x31, 0x8d, 0x5c, 0x5e, 0x27, 0x05, 0xc7, 0x22, 0x7a,
- 0x0a, 0x75, 0x1a, 0xb9, 0x23, 0xff, 0x62, 0x44, 0xdf, 0x44, 0x56, 0x48, 0x26, 0xa3, 0x09, 0x99,
- 0x39, 0x16, 0x9f, 0x68, 0x5e, 0x2a, 0xfc, 0x80, 0x46, 0xee, 0xf9, 0x45, 0x5f, 0xbc, 0xb6, 0x93,
- 0x47, 0x64, 0xc3, 0xdd, 0x71, 0x64, 0x5f, 0x13, 0x36, 0xf2, 0xf9, 0xb0, 0x53, 0x59, 0xae, 0xcf,
- 0x57, 0x2b, 0x97, 0xd9, 0xe2, 0x20, 0xe7, 0x02, 0x03, 0xdf, 0x19, 0xe7, 0x55, 0x74, 0x0e, 0x1b,
- 0xc2, 0x90, 0xdc, 0x9b, 0xcf, 0xde, 0x09, 0x1d, 0x27, 0x28, 0xfa, 0x8f, 0x0a, 0xdc, 0xb9, 0xf1,
- 0x8b, 0xc8, 0x86, 0x0a, 0xf9, 0x2e, 0x98, 0x3a, 0xb6, 0xc3, 0xe4, 0xec, 0x75, 0xfe, 0x4d, 0x06,
- 0x66, 0x47, 0x82, 0x1d, 0xaf, 0xe1, 0x14, 0x58, 0x37, 0xa0, 0x92, 0xd8, 0xd1, 0x43, 0x28, 0x8f,
- 0xfd, 0xc8, 0x9b, 0xd0, 0xba, 0xb2, 0x53, 0xdc, 0x55, 0xb0, 0xd4, 0x5a, 0x65, 0x71, 0xa6, 0x75,
- 0x0a, 0x65, 0x81, 0xf8, 0x37, 0x3d, 0xec, 0xc7, 0x84, 0x89, 0x1b, 0x4c, 0xad, 0x90, 0x37, 0x52,
- 0x6d, 0x3e, 0x5d, 0x91, 0x70, 0x47, 0x86, 0xe3, 0x14, 0x48, 0xff, 0xbe, 0x10, 0x33, 0x14, 0xca,
- 0xcd, 0x65, 0x56, 0x92, 0x65, 0xbe, 0xb1, 0xa5, 0x85, 0x55, 0xb6, 0xf4, 0x1b, 0x50, 0x2d, 0xc6,
- 0x2c, 0xfb, 0xca, 0x25, 0xd9, 0xad, 0x39, 0x7e, 0x47, 0xd2, 0xe6, 0x41, 0x06, 0xd5, 0xf1, 0x58,
- 0x38, 0xc7, 0x79, 0x70, 0xfd, 0x05, 0x68, 0xb7, 0x1d, 0xfe, 0xe2, 0x74, 0xa7, 0x19, 0x16, 0x72,
- 0xe7, 0xea, 0x79, 0xe1, 0x99, 0x62, 0xfc, 0x5e, 0x84, 0x6a, 0x7e, 0xef, 0xd0, 0x7e, 0xbe, 0x09,
- 0x6a, 0x73, 0x6b, 0x21, 0xe5, 0x6e, 0x7a, 0x6b, 0x92, 0x0e, 0x99, 0xd9, 0x96, 0xa9, 0xcd, 0xff,
- 0x2f, 0x04, 0xb4, 0xb3, 0xc3, 0x23, 0x76, 0xf0, 0x0c, 0x2a, 0xd4, 0xb3, 0x02, 0x7a, 0xe5, 0x33,
- 0xf9, 0x0d, 0xd1, 0x7c, 0xeb, 0xbb, 0x60, 0xf6, 0x65, 0x24, 0x4e, 0x31, 0xf4, 0x9f, 0x0b, 0x50,
- 0x49, 0xcc, 0xff, 0x05, 0xff, 0x37, 0x50, 0x0b, 0x48, 0x68, 0x13, 0x8f, 0x39, 0xc9, 0x99, 0x4d,
- 0xba, 0xdc, 0x5e, 0x3d, 0x11, 0x93, 0xab, 0x07, 0xac, 0x97, 0x42, 0x62, 0x2d, 0x83, 0x17, 0xff,
- 0x5c, 0x7a, 0x17, 0x6a, 0x0b, 0x6e, 0x68, 0x1b, 0x20, 0x73, 0x94, 0xc3, 0x9b, 0xb3, 0xdc, 0xec,
- 0x7a, 0x32, 0xd7, 0xad, 0x19, 0x6c, 0x3b, 0xfe, 0x32, 0x9a, 0xad, 0xaa, 0xf8, 0x2a, 0xa2, 0xbd,
- 0xf8, 0xa1, 0xa7, 0xbc, 0x6e, 0x5f, 0x3a, 0xec, 0x2a, 0x1a, 0x9b, 0xb6, 0xef, 0x36, 0x44, 0xcc,
- 0x9e, 0xe3, 0x51, 0x16, 0x46, 0xf1, 0xcc, 0xf1, 0xeb, 0xd8, 0xc8, 0xe0, 0xf6, 0xc4, 0x27, 0xef,
- 0x25, 0xf1, 0xf6, 0x2e, 0xf3, 0x9f, 0xe0, 0xe3, 0x32, 0x7f, 0xf8, 0xf4, 0xcf, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x8e, 0xfc, 0xd7, 0x46, 0xa8, 0x0b, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
deleted file mode 100644
index 38faa9fdf..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/resource/v1/resource.proto
-
-package v1
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Resource information.
-type Resource struct {
- // Type identifier for the resource.
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- // Set of labels that describe the resource.
- Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Resource) Reset() { *m = Resource{} }
-func (m *Resource) String() string { return proto.CompactTextString(m) }
-func (*Resource) ProtoMessage() {}
-func (*Resource) Descriptor() ([]byte, []int) {
- return fileDescriptor_584700775a2fc762, []int{0}
-}
-
-func (m *Resource) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Resource.Unmarshal(m, b)
-}
-func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
-}
-func (m *Resource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Resource.Merge(m, src)
-}
-func (m *Resource) XXX_Size() int {
- return xxx_messageInfo_Resource.Size(m)
-}
-func (m *Resource) XXX_DiscardUnknown() {
- xxx_messageInfo_Resource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Resource proto.InternalMessageInfo
-
-func (m *Resource) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *Resource) GetLabels() map[string]string {
- if m != nil {
- return m.Labels
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
- proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
-}
-
-var fileDescriptor_584700775a2fc762 = []byte{
- // 234 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
- 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
- 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
- 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
- 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
- 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
- 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
- 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
- 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
- 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
- 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
- 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
- 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
- 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
deleted file mode 100644
index 4de05355a..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
+++ /dev/null
@@ -1,1543 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/trace/v1/trace.proto
-
-package v1
-
-import (
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- wrappers "github.com/golang/protobuf/ptypes/wrappers"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Type of span. Can be used to specify additional relationships between spans
-// in addition to a parent/child relationship.
-type Span_SpanKind int32
-
-const (
- // Unspecified.
- Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
- Span_SERVER Span_SpanKind = 1
- // Indicates that the span covers the client-side wrapper around an RPC or
- // other remote request.
- Span_CLIENT Span_SpanKind = 2
-)
-
-var Span_SpanKind_name = map[int32]string{
- 0: "SPAN_KIND_UNSPECIFIED",
- 1: "SERVER",
- 2: "CLIENT",
-}
-
-var Span_SpanKind_value = map[string]int32{
- "SPAN_KIND_UNSPECIFIED": 0,
- "SERVER": 1,
- "CLIENT": 2,
-}
-
-func (x Span_SpanKind) String() string {
- return proto.EnumName(Span_SpanKind_name, int32(x))
-}
-
-func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 0}
-}
-
-// Indicates whether the message was sent or received.
-type Span_TimeEvent_MessageEvent_Type int32
-
-const (
- // Unknown event type.
- Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0
- // Indicates a sent message.
- Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1
- // Indicates a received message.
- Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2
-)
-
-var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "SENT",
- 2: "RECEIVED",
-}
-
-var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "SENT": 1,
- "RECEIVED": 2,
-}
-
-func (x Span_TimeEvent_MessageEvent_Type) String() string {
- return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x))
-}
-
-func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0}
-}
-
-// The relationship of the current span relative to the linked span: child,
-// parent, or unspecified.
-type Span_Link_Type int32
-
-const (
- // The relationship of the two spans is unknown, or known but other
- // than parent-child.
- Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0
- // The linked span is a child of the current span.
- Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1
- // The linked span is a parent of the current span.
- Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2
-)
-
-var Span_Link_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "CHILD_LINKED_SPAN",
- 2: "PARENT_LINKED_SPAN",
-}
-
-var Span_Link_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "CHILD_LINKED_SPAN": 1,
- "PARENT_LINKED_SPAN": 2,
-}
-
-func (x Span_Link_Type) String() string {
- return proto.EnumName(Span_Link_Type_name, int32(x))
-}
-
-func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0}
-}
-
-// A span represents a single operation within a trace. Spans can be
-// nested to form a trace tree. Spans may also be linked to other spans
-// from the same or different trace. And form graphs. Often, a trace
-// contains a root span that describes the end-to-end latency, and one
-// or more subspans for its sub-operations. A trace can also contain
-// multiple root spans, or none at all. Spans do not need to be
-// contiguous - there may be gaps or overlaps between spans in a trace.
-//
-// The next id is 17.
-// TODO(bdrutu): Add an example.
-type Span struct {
- // A unique identifier for a trace. All spans from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
- // is considered invalid.
- //
- // This field is semantically required. Receiver should generate new
- // random trace_id if empty or invalid trace_id was received.
- //
- // This field is required.
- TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
- // A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes is considered
- // invalid.
- //
- // This field is semantically required. Receiver should generate new
- // random span_id if empty or invalid span_id was received.
- //
- // This field is required.
- SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
- // The Tracestate on the span.
- Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"`
- // The `span_id` of this span's parent span. If this is a root span, then this
- // field must be empty. The ID is an 8-byte array.
- ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
- // A description of the span's operation.
- //
- // For example, the name can be a qualified method name or a file name
- // and a line number where the operation is called. A best practice is to use
- // the same display name at the same call point in an application.
- // This makes it easier to correlate spans in different traces.
- //
- // This field is semantically required to be set to non-empty string.
- // When null or empty string received - receiver may use string "name"
- // as a replacement. There might be smarted algorithms implemented by
- // receiver to fix the empty span name.
- //
- // This field is required.
- Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
- // Distinguishes between spans generated in a particular context. For example,
- // two spans with the same name may be distinguished using `CLIENT` (caller)
- // and `SERVER` (callee) to identify queueing latency associated with the span.
- Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
- // The start time of the span. On the client side, this is the time kept by
- // the local machine where the span execution starts. On the server side, this
- // is the time when the server's application handler starts running.
- //
- // This field is semantically required. When not set on receive -
- // receiver should set it to the value of end_time field if it was
- // set. Or to the current time if neither was set. It is important to
- // keep end_time > start_time for consistency.
- //
- // This field is required.
- StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // The end time of the span. On the client side, this is the time kept by
- // the local machine where the span execution ends. On the server side, this
- // is the time when the server application handler stops running.
- //
- // This field is semantically required. When not set on receive -
- // receiver should set it to start_time value. It is important to
- // keep end_time > start_time for consistency.
- //
- // This field is required.
- EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- // A set of attributes on the span.
- Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
- // A stack trace captured at the start of the span.
- StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"`
- // The included time events.
- TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
- // The included links.
- Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
- // An optional final status for this span. Semantically when Status
- // wasn't set it is means span ended without errors and assume
- // Status.Ok (code = 0).
- Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
- // An optional resource that is associated with this span. If not set, this span
- // should be part of a batch that does include the resource information, unless resource
- // information is unknown.
- Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"`
- // A highly recommended but not required flag that identifies when a
- // trace crosses a process boundary. True when the parent_span belongs
- // to the same process as the current span. This flag is most commonly
- // used to indicate the need to adjust time as clocks in different
- // processes may not be synchronized.
- SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
- // An optional number of child spans that were generated while this span
- // was active. If set, allows an implementation to detect missing child spans.
- ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span) Reset() { *m = Span{} }
-func (m *Span) String() string { return proto.CompactTextString(m) }
-func (*Span) ProtoMessage() {}
-func (*Span) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0}
-}
-
-func (m *Span) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span.Unmarshal(m, b)
-}
-func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span.Marshal(b, m, deterministic)
-}
-func (m *Span) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span.Merge(m, src)
-}
-func (m *Span) XXX_Size() int {
- return xxx_messageInfo_Span.Size(m)
-}
-func (m *Span) XXX_DiscardUnknown() {
- xxx_messageInfo_Span.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span proto.InternalMessageInfo
-
-func (m *Span) GetTraceId() []byte {
- if m != nil {
- return m.TraceId
- }
- return nil
-}
-
-func (m *Span) GetSpanId() []byte {
- if m != nil {
- return m.SpanId
- }
- return nil
-}
-
-func (m *Span) GetTracestate() *Span_Tracestate {
- if m != nil {
- return m.Tracestate
- }
- return nil
-}
-
-func (m *Span) GetParentSpanId() []byte {
- if m != nil {
- return m.ParentSpanId
- }
- return nil
-}
-
-func (m *Span) GetName() *TruncatableString {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *Span) GetKind() Span_SpanKind {
- if m != nil {
- return m.Kind
- }
- return Span_SPAN_KIND_UNSPECIFIED
-}
-
-func (m *Span) GetStartTime() *timestamp.Timestamp {
- if m != nil {
- return m.StartTime
- }
- return nil
-}
-
-func (m *Span) GetEndTime() *timestamp.Timestamp {
- if m != nil {
- return m.EndTime
- }
- return nil
-}
-
-func (m *Span) GetAttributes() *Span_Attributes {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span) GetStackTrace() *StackTrace {
- if m != nil {
- return m.StackTrace
- }
- return nil
-}
-
-func (m *Span) GetTimeEvents() *Span_TimeEvents {
- if m != nil {
- return m.TimeEvents
- }
- return nil
-}
-
-func (m *Span) GetLinks() *Span_Links {
- if m != nil {
- return m.Links
- }
- return nil
-}
-
-func (m *Span) GetStatus() *Status {
- if m != nil {
- return m.Status
- }
- return nil
-}
-
-func (m *Span) GetResource() *v1.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
- if m != nil {
- return m.SameProcessAsParentSpan
- }
- return nil
-}
-
-func (m *Span) GetChildSpanCount() *wrappers.UInt32Value {
- if m != nil {
- return m.ChildSpanCount
- }
- return nil
-}
-
-// This field conveys information about request position in multiple distributed tracing graphs.
-// It is a list of Tracestate.Entry with a maximum of 32 members in the list.
-//
-// See the https://github.com/w3c/distributed-tracing for more details about this field.
-type Span_Tracestate struct {
- // A list of entries that represent the Tracestate.
- Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} }
-func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) }
-func (*Span_Tracestate) ProtoMessage() {}
-func (*Span_Tracestate) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 0}
-}
-
-func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b)
-}
-func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic)
-}
-func (m *Span_Tracestate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Tracestate.Merge(m, src)
-}
-func (m *Span_Tracestate) XXX_Size() int {
- return xxx_messageInfo_Span_Tracestate.Size(m)
-}
-func (m *Span_Tracestate) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Tracestate.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo
-
-func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry {
- if m != nil {
- return m.Entries
- }
- return nil
-}
-
-type Span_Tracestate_Entry struct {
- // The key must begin with a lowercase letter, and can only contain
- // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes
- // '-', asterisks '*', and forward slashes '/'.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // The value is opaque string up to 256 characters printable ASCII
- // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='.
- // Note that this also excludes tabs, newlines, carriage returns, etc.
- Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} }
-func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) }
-func (*Span_Tracestate_Entry) ProtoMessage() {}
-func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0}
-}
-
-func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b)
-}
-func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic)
-}
-func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src)
-}
-func (m *Span_Tracestate_Entry) XXX_Size() int {
- return xxx_messageInfo_Span_Tracestate_Entry.Size(m)
-}
-func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo
-
-func (m *Span_Tracestate_Entry) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *Span_Tracestate_Entry) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-// A set of attributes, each with a key and a value.
-type Span_Attributes struct {
- // The set of attributes. The value can be a string, an integer, a double
- // or the Boolean values `true` or `false`. Note, global attributes like
- // server name can be set as tags using resource API. Examples of attributes:
- //
- // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
- // "/http/server_latency": 300
- // "abc.com/myattribute": true
- // "abc.com/score": 10.239
- AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // The number of attributes that were discarded. Attributes can be discarded
- // because their keys are too long or because there are too many attributes.
- // If this value is 0, then no attributes were dropped.
- DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Attributes) Reset() { *m = Span_Attributes{} }
-func (m *Span_Attributes) String() string { return proto.CompactTextString(m) }
-func (*Span_Attributes) ProtoMessage() {}
-func (*Span_Attributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 1}
-}
-
-func (m *Span_Attributes) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Attributes.Unmarshal(m, b)
-}
-func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic)
-}
-func (m *Span_Attributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Attributes.Merge(m, src)
-}
-func (m *Span_Attributes) XXX_Size() int {
- return xxx_messageInfo_Span_Attributes.Size(m)
-}
-func (m *Span_Attributes) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Attributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo
-
-func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue {
- if m != nil {
- return m.AttributeMap
- }
- return nil
-}
-
-func (m *Span_Attributes) GetDroppedAttributesCount() int32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-// A time-stamped annotation or message event in the Span.
-type Span_TimeEvent struct {
- // The time the event occurred.
- Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
- // A `TimeEvent` can contain either an `Annotation` object or a
- // `MessageEvent` object, but not both.
- //
- // Types that are valid to be assigned to Value:
- // *Span_TimeEvent_Annotation_
- // *Span_TimeEvent_MessageEvent_
- Value isSpan_TimeEvent_Value `protobuf_oneof:"value"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} }
-func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvent) ProtoMessage() {}
-func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2}
-}
-
-func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b)
-}
-func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvent.Merge(m, src)
-}
-func (m *Span_TimeEvent) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvent.Size(m)
-}
-func (m *Span_TimeEvent) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo
-
-func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp {
- if m != nil {
- return m.Time
- }
- return nil
-}
-
-type isSpan_TimeEvent_Value interface {
- isSpan_TimeEvent_Value()
-}
-
-type Span_TimeEvent_Annotation_ struct {
- Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"`
-}
-
-type Span_TimeEvent_MessageEvent_ struct {
- MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"`
-}
-
-func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {}
-
-func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {}
-
-func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation {
- if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok {
- return x.Annotation
- }
- return nil
-}
-
-func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
- if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok {
- return x.MessageEvent
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Span_TimeEvent_Annotation_)(nil),
- (*Span_TimeEvent_MessageEvent_)(nil),
- }
-}
-
-// A text annotation with a set of attributes.
-type Span_TimeEvent_Annotation struct {
- // A user-supplied message describing the event.
- Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
- // A set of attributes on the annotation.
- Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} }
-func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvent_Annotation) ProtoMessage() {}
-func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0}
-}
-
-func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b)
-}
-func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src)
-}
-func (m *Span_TimeEvent_Annotation) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m)
-}
-func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo
-
-func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString {
- if m != nil {
- return m.Description
- }
- return nil
-}
-
-func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-// An event describing a message sent/received between Spans.
-type Span_TimeEvent_MessageEvent struct {
- // The type of MessageEvent. Indicates whether the message was sent or
- // received.
- Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"`
- // An identifier for the MessageEvent's message that can be used to match
- // SENT and RECEIVED MessageEvents. For example, this field could
- // represent a sequence ID for a streaming RPC. It is recommended to be
- // unique within a Span.
- Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
- // The number of uncompressed bytes sent or received.
- UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"`
- // The number of compressed bytes sent or received. If zero, assumed to
- // be the same size as uncompressed.
- CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} }
-func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvent_MessageEvent) ProtoMessage() {}
-func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1}
-}
-
-func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo
-
-func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type {
- if m != nil {
- return m.Type
- }
- return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED
-}
-
-func (m *Span_TimeEvent_MessageEvent) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 {
- if m != nil {
- return m.UncompressedSize
- }
- return 0
-}
-
-func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 {
- if m != nil {
- return m.CompressedSize
- }
- return 0
-}
-
-// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation
-// on the span, consisting of either user-supplied key-value pairs, or
-// details of a message sent/received between Spans.
-type Span_TimeEvents struct {
- // A collection of `TimeEvent`s.
- TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"`
- // The number of dropped annotations in all the included time events.
- // If the value is 0, then no annotations were dropped.
- DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"`
- // The number of dropped message events in all the included time events.
- // If the value is 0, then no message events were dropped.
- DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} }
-func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvents) ProtoMessage() {}
-func (*Span_TimeEvents) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 3}
-}
-
-func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b)
-}
-func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvents) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvents.Merge(m, src)
-}
-func (m *Span_TimeEvents) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvents.Size(m)
-}
-func (m *Span_TimeEvents) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo
-
-func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent {
- if m != nil {
- return m.TimeEvent
- }
- return nil
-}
-
-func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 {
- if m != nil {
- return m.DroppedAnnotationsCount
- }
- return 0
-}
-
-func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
- if m != nil {
- return m.DroppedMessageEventsCount
- }
- return 0
-}
-
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
-type Span_Link struct {
- // A unique identifier of a trace that this linked span is part of. The ID is a
- // 16-byte array.
- TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
- // A unique identifier for the linked span. The ID is an 8-byte array.
- SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
- // The relationship of the current span relative to the linked span.
- Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"`
- // A set of attributes on the link.
- Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Link) Reset() { *m = Span_Link{} }
-func (m *Span_Link) String() string { return proto.CompactTextString(m) }
-func (*Span_Link) ProtoMessage() {}
-func (*Span_Link) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 4}
-}
-
-func (m *Span_Link) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Link.Unmarshal(m, b)
-}
-func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
-}
-func (m *Span_Link) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Link.Merge(m, src)
-}
-func (m *Span_Link) XXX_Size() int {
- return xxx_messageInfo_Span_Link.Size(m)
-}
-func (m *Span_Link) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Link.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Link proto.InternalMessageInfo
-
-func (m *Span_Link) GetTraceId() []byte {
- if m != nil {
- return m.TraceId
- }
- return nil
-}
-
-func (m *Span_Link) GetSpanId() []byte {
- if m != nil {
- return m.SpanId
- }
- return nil
-}
-
-func (m *Span_Link) GetType() Span_Link_Type {
- if m != nil {
- return m.Type
- }
- return Span_Link_TYPE_UNSPECIFIED
-}
-
-func (m *Span_Link) GetAttributes() *Span_Attributes {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-// A collection of links, which are references from this span to a span
-// in the same or different trace.
-type Span_Links struct {
- // A collection of links.
- Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"`
- // The number of dropped links after the maximum size was enforced. If
- // this value is 0, then no links were dropped.
- DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Links) Reset() { *m = Span_Links{} }
-func (m *Span_Links) String() string { return proto.CompactTextString(m) }
-func (*Span_Links) ProtoMessage() {}
-func (*Span_Links) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 5}
-}
-
-func (m *Span_Links) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Links.Unmarshal(m, b)
-}
-func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic)
-}
-func (m *Span_Links) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Links.Merge(m, src)
-}
-func (m *Span_Links) XXX_Size() int {
- return xxx_messageInfo_Span_Links.Size(m)
-}
-func (m *Span_Links) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Links.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Links proto.InternalMessageInfo
-
-func (m *Span_Links) GetLink() []*Span_Link {
- if m != nil {
- return m.Link
- }
- return nil
-}
-
-func (m *Span_Links) GetDroppedLinksCount() int32 {
- if m != nil {
- return m.DroppedLinksCount
- }
- return 0
-}
-
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. This proto's fields
-// are a subset of those of
-// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto),
-// which is used by [gRPC](https://github.com/grpc).
-type Status struct {
- // The status code. This is optional field. It is safe to assume 0 (OK)
- // when not set.
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // A developer-facing error message, which should be in English.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{1}
-}
-
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Status.Unmarshal(m, b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return xxx_messageInfo_Status.Size(m)
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *Status) GetCode() int32 {
- if m != nil {
- return m.Code
- }
- return 0
-}
-
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-// The value of an Attribute.
-type AttributeValue struct {
- // The type of the value.
- //
- // Types that are valid to be assigned to Value:
- // *AttributeValue_StringValue
- // *AttributeValue_IntValue
- // *AttributeValue_BoolValue
- // *AttributeValue_DoubleValue
- Value isAttributeValue_Value `protobuf_oneof:"value"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AttributeValue) Reset() { *m = AttributeValue{} }
-func (m *AttributeValue) String() string { return proto.CompactTextString(m) }
-func (*AttributeValue) ProtoMessage() {}
-func (*AttributeValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{2}
-}
-
-func (m *AttributeValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AttributeValue.Unmarshal(m, b)
-}
-func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic)
-}
-func (m *AttributeValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AttributeValue.Merge(m, src)
-}
-func (m *AttributeValue) XXX_Size() int {
- return xxx_messageInfo_AttributeValue.Size(m)
-}
-func (m *AttributeValue) XXX_DiscardUnknown() {
- xxx_messageInfo_AttributeValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AttributeValue proto.InternalMessageInfo
-
-type isAttributeValue_Value interface {
- isAttributeValue_Value()
-}
-
-type AttributeValue_StringValue struct {
- StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
-}
-
-type AttributeValue_IntValue struct {
- IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
-}
-
-type AttributeValue_BoolValue struct {
- BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"`
-}
-
-type AttributeValue_DoubleValue struct {
- DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
-}
-
-func (*AttributeValue_StringValue) isAttributeValue_Value() {}
-
-func (*AttributeValue_IntValue) isAttributeValue_Value() {}
-
-func (*AttributeValue_BoolValue) isAttributeValue_Value() {}
-
-func (*AttributeValue_DoubleValue) isAttributeValue_Value() {}
-
-func (m *AttributeValue) GetValue() isAttributeValue_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *AttributeValue) GetStringValue() *TruncatableString {
- if x, ok := m.GetValue().(*AttributeValue_StringValue); ok {
- return x.StringValue
- }
- return nil
-}
-
-func (m *AttributeValue) GetIntValue() int64 {
- if x, ok := m.GetValue().(*AttributeValue_IntValue); ok {
- return x.IntValue
- }
- return 0
-}
-
-func (m *AttributeValue) GetBoolValue() bool {
- if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok {
- return x.BoolValue
- }
- return false
-}
-
-func (m *AttributeValue) GetDoubleValue() float64 {
- if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok {
- return x.DoubleValue
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*AttributeValue) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*AttributeValue_StringValue)(nil),
- (*AttributeValue_IntValue)(nil),
- (*AttributeValue_BoolValue)(nil),
- (*AttributeValue_DoubleValue)(nil),
- }
-}
-
-// The call stack which originated this span.
-type StackTrace struct {
- // Stack frames in this stack trace.
- StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"`
- // The hash ID is used to conserve network bandwidth for duplicate
- // stack traces within a single trace.
- //
- // Often multiple spans will have identical stack traces.
- // The first occurrence of a stack trace should contain both
- // `stack_frames` and a value in `stack_trace_hash_id`.
- //
- // Subsequent spans within the same request can refer
- // to that stack trace by setting only `stack_trace_hash_id`.
- //
- // TODO: describe how to deal with the case where stack_trace_hash_id is
- // zero because it was not set.
- StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StackTrace) Reset() { *m = StackTrace{} }
-func (m *StackTrace) String() string { return proto.CompactTextString(m) }
-func (*StackTrace) ProtoMessage() {}
-func (*StackTrace) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{3}
-}
-
-func (m *StackTrace) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StackTrace.Unmarshal(m, b)
-}
-func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic)
-}
-func (m *StackTrace) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StackTrace.Merge(m, src)
-}
-func (m *StackTrace) XXX_Size() int {
- return xxx_messageInfo_StackTrace.Size(m)
-}
-func (m *StackTrace) XXX_DiscardUnknown() {
- xxx_messageInfo_StackTrace.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StackTrace proto.InternalMessageInfo
-
-func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames {
- if m != nil {
- return m.StackFrames
- }
- return nil
-}
-
-func (m *StackTrace) GetStackTraceHashId() uint64 {
- if m != nil {
- return m.StackTraceHashId
- }
- return 0
-}
-
-// A single stack frame in a stack trace.
-type StackTrace_StackFrame struct {
- // The fully-qualified name that uniquely identifies the function or
- // method that is active in this frame.
- FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"`
- // An un-mangled function name, if `function_name` is
- // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can
- // be fully qualified.
- OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"`
- // The name of the source file where the function call appears.
- FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
- // The line number in `file_name` where the function call appears.
- LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"`
- // The column number where the function call appears, if available.
- // This is important in JavaScript because of its anonymous functions.
- ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"`
- // The binary module from where the code was loaded.
- LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"`
- // The version of the deployed source code.
- SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} }
-func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) }
-func (*StackTrace_StackFrame) ProtoMessage() {}
-func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{3, 0}
-}
-
-func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b)
-}
-func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic)
-}
-func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StackTrace_StackFrame.Merge(m, src)
-}
-func (m *StackTrace_StackFrame) XXX_Size() int {
- return xxx_messageInfo_StackTrace_StackFrame.Size(m)
-}
-func (m *StackTrace_StackFrame) XXX_DiscardUnknown() {
- xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo
-
-func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString {
- if m != nil {
- return m.FunctionName
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString {
- if m != nil {
- return m.OriginalFunctionName
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetFileName() *TruncatableString {
- if m != nil {
- return m.FileName
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetLineNumber() int64 {
- if m != nil {
- return m.LineNumber
- }
- return 0
-}
-
-func (m *StackTrace_StackFrame) GetColumnNumber() int64 {
- if m != nil {
- return m.ColumnNumber
- }
- return 0
-}
-
-func (m *StackTrace_StackFrame) GetLoadModule() *Module {
- if m != nil {
- return m.LoadModule
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString {
- if m != nil {
- return m.SourceVersion
- }
- return nil
-}
-
-// A collection of stack frames, which can be truncated.
-type StackTrace_StackFrames struct {
- // Stack frames in this call stack.
- Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"`
- // The number of stack frames that were dropped because there
- // were too many stack frames.
- // If this value is 0, then no stack frames were dropped.
- DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} }
-func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) }
-func (*StackTrace_StackFrames) ProtoMessage() {}
-func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{3, 1}
-}
-
-func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b)
-}
-func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic)
-}
-func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StackTrace_StackFrames.Merge(m, src)
-}
-func (m *StackTrace_StackFrames) XXX_Size() int {
- return xxx_messageInfo_StackTrace_StackFrames.Size(m)
-}
-func (m *StackTrace_StackFrames) XXX_DiscardUnknown() {
- xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo
-
-func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame {
- if m != nil {
- return m.Frame
- }
- return nil
-}
-
-func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 {
- if m != nil {
- return m.DroppedFramesCount
- }
- return 0
-}
-
-// A description of a binary module.
-type Module struct {
- // TODO: document the meaning of this field.
- // For example: main binary, kernel modules, and dynamic libraries
- // such as libc.so, sharedlib.so.
- Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
- // A unique identifier for the module, usually a hash of its
- // contents.
- BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Module) Reset() { *m = Module{} }
-func (m *Module) String() string { return proto.CompactTextString(m) }
-func (*Module) ProtoMessage() {}
-func (*Module) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{4}
-}
-
-func (m *Module) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Module.Unmarshal(m, b)
-}
-func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Module.Marshal(b, m, deterministic)
-}
-func (m *Module) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Module.Merge(m, src)
-}
-func (m *Module) XXX_Size() int {
- return xxx_messageInfo_Module.Size(m)
-}
-func (m *Module) XXX_DiscardUnknown() {
- xxx_messageInfo_Module.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Module proto.InternalMessageInfo
-
-func (m *Module) GetModule() *TruncatableString {
- if m != nil {
- return m.Module
- }
- return nil
-}
-
-func (m *Module) GetBuildId() *TruncatableString {
- if m != nil {
- return m.BuildId
- }
- return nil
-}
-
-// A string that might be shortened to a specified length.
-type TruncatableString struct {
- // The shortened string. For example, if the original string was 500 bytes long and
- // the limit of the string was 128 bytes, then this value contains the first 128
- // bytes of the 500-byte string. Note that truncation always happens on a
- // character boundary, to ensure that a truncated string is still valid UTF-8.
- // Because it may contain multi-byte characters, the size of the truncated string
- // may be less than the truncation limit.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- // The number of bytes removed from the original string. If this
- // value is 0, then the string was not shortened.
- TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TruncatableString) Reset() { *m = TruncatableString{} }
-func (m *TruncatableString) String() string { return proto.CompactTextString(m) }
-func (*TruncatableString) ProtoMessage() {}
-func (*TruncatableString) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{5}
-}
-
-func (m *TruncatableString) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TruncatableString.Unmarshal(m, b)
-}
-func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic)
-}
-func (m *TruncatableString) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TruncatableString.Merge(m, src)
-}
-func (m *TruncatableString) XXX_Size() int {
- return xxx_messageInfo_TruncatableString.Size(m)
-}
-func (m *TruncatableString) XXX_DiscardUnknown() {
- xxx_messageInfo_TruncatableString.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TruncatableString proto.InternalMessageInfo
-
-func (m *TruncatableString) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-func (m *TruncatableString) GetTruncatedByteCount() int32 {
- if m != nil {
- return m.TruncatedByteCount
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value)
- proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value)
- proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value)
- proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span")
- proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate")
- proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry")
- proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes")
- proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry")
- proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent")
- proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation")
- proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent")
- proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents")
- proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link")
- proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links")
- proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status")
- proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue")
- proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace")
- proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame")
- proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames")
- proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module")
- proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584)
-}
-
-var fileDescriptor_8ea38bbb821bf584 = []byte{
- // 1557 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47,
- 0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17,
- 0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6,
- 0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81,
- 0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4,
- 0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d,
- 0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7,
- 0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91,
- 0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91,
- 0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6,
- 0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d,
- 0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15,
- 0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75,
- 0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97,
- 0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f,
- 0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb,
- 0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12,
- 0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e,
- 0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69,
- 0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9,
- 0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad,
- 0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e,
- 0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35,
- 0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69,
- 0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46,
- 0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49,
- 0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5,
- 0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04,
- 0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59,
- 0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27,
- 0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10,
- 0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b,
- 0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3,
- 0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90,
- 0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84,
- 0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5,
- 0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44,
- 0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7,
- 0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7,
- 0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b,
- 0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34,
- 0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45,
- 0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91,
- 0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08,
- 0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce,
- 0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5,
- 0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73,
- 0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee,
- 0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29,
- 0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5,
- 0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73,
- 0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4,
- 0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf,
- 0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16,
- 0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8,
- 0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab,
- 0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea,
- 0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8,
- 0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79,
- 0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd,
- 0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14,
- 0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb,
- 0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8,
- 0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8,
- 0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b,
- 0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc,
- 0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88,
- 0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c,
- 0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60,
- 0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56,
- 0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf,
- 0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad,
- 0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c,
- 0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f,
- 0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65,
- 0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc,
- 0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1,
- 0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba,
- 0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e,
- 0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55,
- 0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27,
- 0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54,
- 0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f,
- 0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45,
- 0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf,
- 0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab,
- 0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe,
- 0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e,
- 0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc,
- 0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd,
- 0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74,
- 0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26,
- 0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae,
- 0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3,
- 0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73,
- 0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3,
- 0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94,
- 0x45, 0x03, 0x11, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
deleted file mode 100644
index 2ac2d28c4..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/trace/v1/trace_config.proto
-
-package v1
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// How spans should be sampled:
-// - Always off
-// - Always on
-// - Always follow the parent Span's decision (off if no parent).
-type ConstantSampler_ConstantDecision int32
-
-const (
- ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0
- ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1
- ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2
-)
-
-var ConstantSampler_ConstantDecision_name = map[int32]string{
- 0: "ALWAYS_OFF",
- 1: "ALWAYS_ON",
- 2: "ALWAYS_PARENT",
-}
-
-var ConstantSampler_ConstantDecision_value = map[string]int32{
- "ALWAYS_OFF": 0,
- "ALWAYS_ON": 1,
- "ALWAYS_PARENT": 2,
-}
-
-func (x ConstantSampler_ConstantDecision) String() string {
- return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x))
-}
-
-func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{2, 0}
-}
-
-// Global configuration of the trace service. All fields must be specified, or
-// the default (zero) values will be used for each type.
-type TraceConfig struct {
- // The global default sampler used to make decisions on span sampling.
- //
- // Types that are valid to be assigned to Sampler:
- // *TraceConfig_ProbabilitySampler
- // *TraceConfig_ConstantSampler
- // *TraceConfig_RateLimitingSampler
- Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"`
- // The global default max number of attributes per span.
- MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"`
- // The global default max number of annotation events per span.
- MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"`
- // The global default max number of message events per span.
- MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"`
- // The global default max number of link entries per span.
- MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TraceConfig) Reset() { *m = TraceConfig{} }
-func (m *TraceConfig) String() string { return proto.CompactTextString(m) }
-func (*TraceConfig) ProtoMessage() {}
-func (*TraceConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{0}
-}
-
-func (m *TraceConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TraceConfig.Unmarshal(m, b)
-}
-func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic)
-}
-func (m *TraceConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TraceConfig.Merge(m, src)
-}
-func (m *TraceConfig) XXX_Size() int {
- return xxx_messageInfo_TraceConfig.Size(m)
-}
-func (m *TraceConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_TraceConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TraceConfig proto.InternalMessageInfo
-
-type isTraceConfig_Sampler interface {
- isTraceConfig_Sampler()
-}
-
-type TraceConfig_ProbabilitySampler struct {
- ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"`
-}
-
-type TraceConfig_ConstantSampler struct {
- ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"`
-}
-
-type TraceConfig_RateLimitingSampler struct {
- RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"`
-}
-
-func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {}
-
-func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {}
-
-func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {}
-
-func (m *TraceConfig) GetSampler() isTraceConfig_Sampler {
- if m != nil {
- return m.Sampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler {
- if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok {
- return x.ProbabilitySampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetConstantSampler() *ConstantSampler {
- if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok {
- return x.ConstantSampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler {
- if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok {
- return x.RateLimitingSampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetMaxNumberOfAttributes() int64 {
- if m != nil {
- return m.MaxNumberOfAttributes
- }
- return 0
-}
-
-func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 {
- if m != nil {
- return m.MaxNumberOfAnnotations
- }
- return 0
-}
-
-func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 {
- if m != nil {
- return m.MaxNumberOfMessageEvents
- }
- return 0
-}
-
-func (m *TraceConfig) GetMaxNumberOfLinks() int64 {
- if m != nil {
- return m.MaxNumberOfLinks
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*TraceConfig) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*TraceConfig_ProbabilitySampler)(nil),
- (*TraceConfig_ConstantSampler)(nil),
- (*TraceConfig_RateLimitingSampler)(nil),
- }
-}
-
-// Sampler that tries to uniformly sample traces with a given probability.
-// The probability of sampling a trace is equal to that of the specified probability.
-type ProbabilitySampler struct {
- // The desired probability of sampling. Must be within [0.0, 1.0].
- SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} }
-func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) }
-func (*ProbabilitySampler) ProtoMessage() {}
-func (*ProbabilitySampler) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{1}
-}
-
-func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b)
-}
-func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic)
-}
-func (m *ProbabilitySampler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProbabilitySampler.Merge(m, src)
-}
-func (m *ProbabilitySampler) XXX_Size() int {
- return xxx_messageInfo_ProbabilitySampler.Size(m)
-}
-func (m *ProbabilitySampler) XXX_DiscardUnknown() {
- xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo
-
-func (m *ProbabilitySampler) GetSamplingProbability() float64 {
- if m != nil {
- return m.SamplingProbability
- }
- return 0
-}
-
-// Sampler that always makes a constant decision on span sampling.
-type ConstantSampler struct {
- Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ConstantSampler) Reset() { *m = ConstantSampler{} }
-func (m *ConstantSampler) String() string { return proto.CompactTextString(m) }
-func (*ConstantSampler) ProtoMessage() {}
-func (*ConstantSampler) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{2}
-}
-
-func (m *ConstantSampler) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ConstantSampler.Unmarshal(m, b)
-}
-func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic)
-}
-func (m *ConstantSampler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConstantSampler.Merge(m, src)
-}
-func (m *ConstantSampler) XXX_Size() int {
- return xxx_messageInfo_ConstantSampler.Size(m)
-}
-func (m *ConstantSampler) XXX_DiscardUnknown() {
- xxx_messageInfo_ConstantSampler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo
-
-func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision {
- if m != nil {
- return m.Decision
- }
- return ConstantSampler_ALWAYS_OFF
-}
-
-// Sampler that tries to sample with a rate per time window.
-type RateLimitingSampler struct {
- // Rate per second.
- Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} }
-func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) }
-func (*RateLimitingSampler) ProtoMessage() {}
-func (*RateLimitingSampler) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{3}
-}
-
-func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b)
-}
-func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic)
-}
-func (m *RateLimitingSampler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RateLimitingSampler.Merge(m, src)
-}
-func (m *RateLimitingSampler) XXX_Size() int {
- return xxx_messageInfo_RateLimitingSampler.Size(m)
-}
-func (m *RateLimitingSampler) XXX_DiscardUnknown() {
- xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo
-
-func (m *RateLimitingSampler) GetQps() int64 {
- if m != nil {
- return m.Qps
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value)
- proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig")
- proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler")
- proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler")
- proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5)
-}
-
-var fileDescriptor_5359209b41ff50c5 = []byte{
- // 486 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40,
- 0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa,
- 0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4,
- 0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4,
- 0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33,
- 0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98,
- 0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52,
- 0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9,
- 0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc,
- 0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d,
- 0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d,
- 0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90,
- 0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e,
- 0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85,
- 0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71,
- 0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00,
- 0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71,
- 0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37,
- 0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8,
- 0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e,
- 0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04,
- 0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4,
- 0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39,
- 0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe,
- 0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d,
- 0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49,
- 0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd,
- 0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49,
- 0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30,
- 0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2,
- 0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
deleted file mode 100644
index ada2b78e8..000000000
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ /dev/null
@@ -1,1271 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2015 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
-It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
-
-This package produces a different output than the standard "encoding/json" package,
-which does not operate correctly on protocol buffers.
-*/
-package jsonpb
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
-
- stpb "github.com/golang/protobuf/ptypes/struct"
-)
-
-const secondInNanos = int64(time.Second / time.Nanosecond)
-
-// Marshaler is a configurable object for converting between
-// protocol buffer objects and a JSON representation for them.
-type Marshaler struct {
- // Whether to render enum values as integers, as opposed to string values.
- EnumsAsInts bool
-
- // Whether to render fields with zero values.
- EmitDefaults bool
-
- // A string to indent each level by. The presence of this field will
- // also cause a space to appear between the field separator and
- // value, and for newlines to be appear between fields and array
- // elements.
- Indent string
-
- // Whether to use the original (.proto) name for fields.
- OrigName bool
-
- // A custom URL resolver to use when marshaling Any messages to JSON.
- // If unset, the default resolution strategy is to extract the
- // fully-qualified type name from the type URL and pass that to
- // proto.MessageType(string).
- AnyResolver AnyResolver
-}
-
-// AnyResolver takes a type URL, present in an Any message, and resolves it into
-// an instance of the associated message.
-type AnyResolver interface {
- Resolve(typeUrl string) (proto.Message, error)
-}
-
-func defaultResolveAny(typeUrl string) (proto.Message, error) {
- // Only the part of typeUrl after the last slash is relevant.
- mname := typeUrl
- if slash := strings.LastIndex(mname, "/"); slash >= 0 {
- mname = mname[slash+1:]
- }
- mt := proto.MessageType(mname)
- if mt == nil {
- return nil, fmt.Errorf("unknown message type %q", mname)
- }
- return reflect.New(mt.Elem()).Interface().(proto.Message), nil
-}
-
-// JSONPBMarshaler is implemented by protobuf messages that customize the
-// way they are marshaled to JSON. Messages that implement this should
-// also implement JSONPBUnmarshaler so that the custom format can be
-// parsed.
-//
-// The JSON marshaling must follow the proto to JSON specification:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBMarshaler interface {
- MarshalJSONPB(*Marshaler) ([]byte, error)
-}
-
-// JSONPBUnmarshaler is implemented by protobuf messages that customize
-// the way they are unmarshaled from JSON. Messages that implement this
-// should also implement JSONPBMarshaler so that the custom format can be
-// produced.
-//
-// The JSON unmarshaling must follow the JSON to proto specification:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBUnmarshaler interface {
- UnmarshalJSONPB(*Unmarshaler, []byte) error
-}
-
-// Marshal marshals a protocol buffer into JSON.
-func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
- v := reflect.ValueOf(pb)
- if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
- return errors.New("Marshal called with nil")
- }
- // Check for unset required fields first.
- if err := checkRequiredFields(pb); err != nil {
- return err
- }
- writer := &errWriter{writer: out}
- return m.marshalObject(writer, pb, "", "")
-}
-
-// MarshalToString converts a protocol buffer object to JSON string.
-func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
- var buf bytes.Buffer
- if err := m.Marshal(&buf, pb); err != nil {
- return "", err
- }
- return buf.String(), nil
-}
-
-type int32Slice []int32
-
-var nonFinite = map[string]float64{
- `"NaN"`: math.NaN(),
- `"Infinity"`: math.Inf(1),
- `"-Infinity"`: math.Inf(-1),
-}
-
-// For sorting extensions ids to ensure stable output.
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-type wkt interface {
- XXX_WellKnownType() string
-}
-
-// marshalObject writes a struct to the Writer.
-func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
- if jsm, ok := v.(JSONPBMarshaler); ok {
- b, err := jsm.MarshalJSONPB(m)
- if err != nil {
- return err
- }
- if typeURL != "" {
- // we are marshaling this object to an Any type
- var js map[string]*json.RawMessage
- if err = json.Unmarshal(b, &js); err != nil {
- return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
- }
- turl, err := json.Marshal(typeURL)
- if err != nil {
- return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
- }
- js["@type"] = (*json.RawMessage)(&turl)
- if b, err = json.Marshal(js); err != nil {
- return err
- }
- }
-
- out.write(string(b))
- return out.err
- }
-
- s := reflect.ValueOf(v).Elem()
-
- // Handle well-known types.
- if wkt, ok := v.(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
- "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
- // "Wrappers use the same representation in JSON
- // as the wrapped primitive type, ..."
- sprop := proto.GetProperties(s.Type())
- return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
- case "Any":
- // Any is a bit more involved.
- return m.marshalAny(out, v, indent)
- case "Duration":
- // "Generated output always contains 0, 3, 6, or 9 fractional digits,
- // depending on required precision."
- s, ns := s.Field(0).Int(), s.Field(1).Int()
- if ns <= -secondInNanos || ns >= secondInNanos {
- return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
- }
- if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
- return errors.New("signs of seconds and nanos do not match")
- }
- if s < 0 {
- ns = -ns
- }
- x := fmt.Sprintf("%d.%09d", s, ns)
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- out.write(`"`)
- out.write(x)
- out.write(`s"`)
- return out.err
- case "Struct", "ListValue":
- // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
- // TODO: pass the correct Properties if needed.
- return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
- case "Timestamp":
- // "RFC 3339, where generated output will always be Z-normalized
- // and uses 0, 3, 6 or 9 fractional digits."
- s, ns := s.Field(0).Int(), s.Field(1).Int()
- if ns < 0 || ns >= secondInNanos {
- return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
- }
- t := time.Unix(s, ns).UTC()
- // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
- x := t.Format("2006-01-02T15:04:05.000000000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- out.write(`"`)
- out.write(x)
- out.write(`Z"`)
- return out.err
- case "Value":
- // Value has a single oneof.
- kind := s.Field(0)
- if kind.IsNil() {
- // "absence of any variant indicates an error"
- return errors.New("nil Value")
- }
- // oneof -> *T -> T -> T.F
- x := kind.Elem().Elem().Field(0)
- // TODO: pass the correct Properties if needed.
- return m.marshalValue(out, &proto.Properties{}, x, indent)
- }
- }
-
- out.write("{")
- if m.Indent != "" {
- out.write("\n")
- }
-
- firstField := true
-
- if typeURL != "" {
- if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
- return err
- }
- firstField = false
- }
-
- for i := 0; i < s.NumField(); i++ {
- value := s.Field(i)
- valueField := s.Type().Field(i)
- if strings.HasPrefix(valueField.Name, "XXX_") {
- continue
- }
-
- // IsNil will panic on most value kinds.
- switch value.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface:
- if value.IsNil() {
- continue
- }
- }
-
- if !m.EmitDefaults {
- switch value.Kind() {
- case reflect.Bool:
- if !value.Bool() {
- continue
- }
- case reflect.Int32, reflect.Int64:
- if value.Int() == 0 {
- continue
- }
- case reflect.Uint32, reflect.Uint64:
- if value.Uint() == 0 {
- continue
- }
- case reflect.Float32, reflect.Float64:
- if value.Float() == 0 {
- continue
- }
- case reflect.String:
- if value.Len() == 0 {
- continue
- }
- case reflect.Map, reflect.Ptr, reflect.Slice:
- if value.IsNil() {
- continue
- }
- }
- }
-
- // Oneof fields need special handling.
- if valueField.Tag.Get("protobuf_oneof") != "" {
- // value is an interface containing &T{real_value}.
- sv := value.Elem().Elem() // interface -> *T -> T
- value = sv.Field(0)
- valueField = sv.Type().Field(0)
- }
- prop := jsonProperties(valueField, m.OrigName)
- if !firstField {
- m.writeSep(out)
- }
- if err := m.marshalField(out, prop, value, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- // Handle proto2 extensions.
- if ep, ok := v.(proto.Message); ok {
- extensions := proto.RegisteredExtensions(v)
- // Sort extensions for stable output.
- ids := make([]int32, 0, len(extensions))
- for id, desc := range extensions {
- if !proto.HasExtension(ep, desc) {
- continue
- }
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- for _, id := range ids {
- desc := extensions[id]
- if desc == nil {
- // unknown extension
- continue
- }
- ext, extErr := proto.GetExtension(ep, desc)
- if extErr != nil {
- return extErr
- }
- value := reflect.ValueOf(ext)
- var prop proto.Properties
- prop.Parse(desc.Tag)
- prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
- if !firstField {
- m.writeSep(out)
- }
- if err := m.marshalField(out, &prop, value, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- }
-
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- }
- out.write("}")
- return out.err
-}
-
-func (m *Marshaler) writeSep(out *errWriter) {
- if m.Indent != "" {
- out.write(",\n")
- } else {
- out.write(",")
- }
-}
-
-func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
- // "If the Any contains a value that has a special JSON mapping,
- // it will be converted as follows: {"@type": xxx, "value": yyy}.
- // Otherwise, the value will be converted into a JSON object,
- // and the "@type" field will be inserted to indicate the actual data type."
- v := reflect.ValueOf(any).Elem()
- turl := v.Field(0).String()
- val := v.Field(1).Bytes()
-
- var msg proto.Message
- var err error
- if m.AnyResolver != nil {
- msg, err = m.AnyResolver.Resolve(turl)
- } else {
- msg, err = defaultResolveAny(turl)
- }
- if err != nil {
- return err
- }
-
- if err := proto.Unmarshal(val, msg); err != nil {
- return err
- }
-
- if _, ok := msg.(wkt); ok {
- out.write("{")
- if m.Indent != "" {
- out.write("\n")
- }
- if err := m.marshalTypeURL(out, indent, turl); err != nil {
- return err
- }
- m.writeSep(out)
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- out.write(`"value": `)
- } else {
- out.write(`"value":`)
- }
- if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
- return err
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- }
- out.write("}")
- return out.err
- }
-
- return m.marshalObject(out, msg, indent, turl)
-}
-
-func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`"@type":`)
- if m.Indent != "" {
- out.write(" ")
- }
- b, err := json.Marshal(typeURL)
- if err != nil {
- return err
- }
- out.write(string(b))
- return out.err
-}
-
-// marshalField writes field description and value to the Writer.
-func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`"`)
- out.write(prop.JSONName)
- out.write(`":`)
- if m.Indent != "" {
- out.write(" ")
- }
- if err := m.marshalValue(out, prop, v, indent); err != nil {
- return err
- }
- return nil
-}
-
-// marshalValue writes the value to the Writer.
-func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
- var err error
- v = reflect.Indirect(v)
-
- // Handle nil pointer
- if v.Kind() == reflect.Invalid {
- out.write("null")
- return out.err
- }
-
- // Handle repeated elements.
- if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
- out.write("[")
- comma := ""
- for i := 0; i < v.Len(); i++ {
- sliceVal := v.Index(i)
- out.write(comma)
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- out.write(m.Indent)
- }
- if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
- return err
- }
- comma = ","
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- }
- out.write("]")
- return out.err
- }
-
- // Handle well-known types.
- // Most are handled up in marshalObject (because 99% are messages).
- if wkt, ok := v.Interface().(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "NullValue":
- out.write("null")
- return out.err
- }
- }
-
- // Handle enumerations.
- if !m.EnumsAsInts && prop.Enum != "" {
- // Unknown enum values will are stringified by the proto library as their
- // value. Such values should _not_ be quoted or they will be interpreted
- // as an enum string instead of their value.
- enumStr := v.Interface().(fmt.Stringer).String()
- var valStr string
- if v.Kind() == reflect.Ptr {
- valStr = strconv.Itoa(int(v.Elem().Int()))
- } else {
- valStr = strconv.Itoa(int(v.Int()))
- }
- isKnownEnum := enumStr != valStr
- if isKnownEnum {
- out.write(`"`)
- }
- out.write(enumStr)
- if isKnownEnum {
- out.write(`"`)
- }
- return out.err
- }
-
- // Handle nested messages.
- if v.Kind() == reflect.Struct {
- return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
- }
-
- // Handle maps.
- // Since Go randomizes map iteration, we sort keys for stable output.
- if v.Kind() == reflect.Map {
- out.write(`{`)
- keys := v.MapKeys()
- sort.Sort(mapKeys(keys))
- for i, k := range keys {
- if i > 0 {
- out.write(`,`)
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- out.write(m.Indent)
- }
-
- // TODO handle map key prop properly
- b, err := json.Marshal(k.Interface())
- if err != nil {
- return err
- }
- s := string(b)
-
- // If the JSON is not a string value, encode it again to make it one.
- if !strings.HasPrefix(s, `"`) {
- b, err := json.Marshal(s)
- if err != nil {
- return err
- }
- s = string(b)
- }
-
- out.write(s)
- out.write(`:`)
- if m.Indent != "" {
- out.write(` `)
- }
-
- vprop := prop
- if prop != nil && prop.MapValProp != nil {
- vprop = prop.MapValProp
- }
- if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
- return err
- }
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`}`)
- return out.err
- }
-
- // Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- f := v.Float()
- var sval string
- switch {
- case math.IsInf(f, 1):
- sval = `"Infinity"`
- case math.IsInf(f, -1):
- sval = `"-Infinity"`
- case math.IsNaN(f):
- sval = `"NaN"`
- }
- if sval != "" {
- out.write(sval)
- return out.err
- }
- }
-
- // Default handling defers to the encoding/json library.
- b, err := json.Marshal(v.Interface())
- if err != nil {
- return err
- }
- needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
- if needToQuote {
- out.write(`"`)
- }
- out.write(string(b))
- if needToQuote {
- out.write(`"`)
- }
- return out.err
-}
-
-// Unmarshaler is a configurable object for converting from a JSON
-// representation to a protocol buffer object.
-type Unmarshaler struct {
- // Whether to allow messages to contain unknown fields, as opposed to
- // failing to unmarshal.
- AllowUnknownFields bool
-
- // A custom URL resolver to use when unmarshaling Any messages from JSON.
- // If unset, the default resolution strategy is to extract the
- // fully-qualified type name from the type URL and pass that to
- // proto.MessageType(string).
- AnyResolver AnyResolver
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
- inputValue := json.RawMessage{}
- if err := dec.Decode(&inputValue); err != nil {
- return err
- }
- if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
- return err
- }
- return checkRequiredFields(pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
- dec := json.NewDecoder(r)
- return u.UnmarshalNext(dec, pb)
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
- return new(Unmarshaler).UnmarshalNext(dec, pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func Unmarshal(r io.Reader, pb proto.Message) error {
- return new(Unmarshaler).Unmarshal(r, pb)
-}
-
-// UnmarshalString will populate the fields of a protocol buffer based
-// on a JSON string. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func UnmarshalString(str string, pb proto.Message) error {
- return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
-}
-
-// unmarshalValue converts/copies a value into the target.
-// prop may be nil.
-func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
- targetType := target.Type()
-
- // Allocate memory for pointer fields.
- if targetType.Kind() == reflect.Ptr {
- // If input value is "null" and target is a pointer type, then the field should be treated as not set
- // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
- _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
- if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
- return nil
- }
- target.Set(reflect.New(targetType.Elem()))
-
- return u.unmarshalValue(target.Elem(), inputValue, prop)
- }
-
- if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
- return jsu.UnmarshalJSONPB(u, []byte(inputValue))
- }
-
- // Handle well-known types that are not pointers.
- if w, ok := target.Addr().Interface().(wkt); ok {
- switch w.XXX_WellKnownType() {
- case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
- "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
- return u.unmarshalValue(target.Field(0), inputValue, prop)
- case "Any":
- // Use json.RawMessage pointer type instead of value to support pre-1.8 version.
- // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
- // https://github.com/golang/go/issues/14493
- var jsonFields map[string]*json.RawMessage
- if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
- return err
- }
-
- val, ok := jsonFields["@type"]
- if !ok || val == nil {
- return errors.New("Any JSON doesn't have '@type'")
- }
-
- var turl string
- if err := json.Unmarshal([]byte(*val), &turl); err != nil {
- return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
- }
- target.Field(0).SetString(turl)
-
- var m proto.Message
- var err error
- if u.AnyResolver != nil {
- m, err = u.AnyResolver.Resolve(turl)
- } else {
- m, err = defaultResolveAny(turl)
- }
- if err != nil {
- return err
- }
-
- if _, ok := m.(wkt); ok {
- val, ok := jsonFields["value"]
- if !ok {
- return errors.New("Any JSON doesn't have 'value'")
- }
-
- if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
- }
- } else {
- delete(jsonFields, "@type")
- nestedProto, err := json.Marshal(jsonFields)
- if err != nil {
- return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
- }
-
- if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
- }
- }
-
- b, err := proto.Marshal(m)
- if err != nil {
- return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
- }
- target.Field(1).SetBytes(b)
-
- return nil
- case "Duration":
- unq, err := unquote(string(inputValue))
- if err != nil {
- return err
- }
-
- d, err := time.ParseDuration(unq)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- ns := d.Nanoseconds()
- s := ns / 1e9
- ns %= 1e9
- target.Field(0).SetInt(s)
- target.Field(1).SetInt(ns)
- return nil
- case "Timestamp":
- unq, err := unquote(string(inputValue))
- if err != nil {
- return err
- }
-
- t, err := time.Parse(time.RFC3339Nano, unq)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
-
- target.Field(0).SetInt(t.Unix())
- target.Field(1).SetInt(int64(t.Nanosecond()))
- return nil
- case "Struct":
- var m map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &m); err != nil {
- return fmt.Errorf("bad StructValue: %v", err)
- }
-
- target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
- for k, jv := range m {
- pv := &stpb.Value{}
- if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
- return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
- }
- target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
- }
- return nil
- case "ListValue":
- var s []json.RawMessage
- if err := json.Unmarshal(inputValue, &s); err != nil {
- return fmt.Errorf("bad ListValue: %v", err)
- }
-
- target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
- for i, sv := range s {
- if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
- return err
- }
- }
- return nil
- case "Value":
- ivStr := string(inputValue)
- if ivStr == "null" {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
- } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
- } else if v, err := unquote(ivStr); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
- } else if v, err := strconv.ParseBool(ivStr); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
- } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
- lv := &stpb.ListValue{}
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
- return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
- } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
- sv := &stpb.Struct{}
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
- return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
- } else {
- return fmt.Errorf("unrecognized type for Value %q", ivStr)
- }
- return nil
- }
- }
-
- // Handle enums, which have an underlying type of int32,
- // and may appear as strings.
- // The case of an enum appearing as a number is handled
- // at the bottom of this function.
- if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
- vmap := proto.EnumValueMap(prop.Enum)
- // Don't need to do unquoting; valid enum names
- // are from a limited character set.
- s := inputValue[1 : len(inputValue)-1]
- n, ok := vmap[string(s)]
- if !ok {
- return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
- }
- if target.Kind() == reflect.Ptr { // proto2
- target.Set(reflect.New(targetType.Elem()))
- target = target.Elem()
- }
- if targetType.Kind() != reflect.Int32 {
- return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
- }
- target.SetInt(int64(n))
- return nil
- }
-
- // Handle nested messages.
- if targetType.Kind() == reflect.Struct {
- var jsonFields map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
- return err
- }
-
- consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
- // Be liberal in what names we accept; both orig_name and camelName are okay.
- fieldNames := acceptedJSONFieldNames(prop)
-
- vOrig, okOrig := jsonFields[fieldNames.orig]
- vCamel, okCamel := jsonFields[fieldNames.camel]
- if !okOrig && !okCamel {
- return nil, false
- }
- // If, for some reason, both are present in the data, favour the camelName.
- var raw json.RawMessage
- if okOrig {
- raw = vOrig
- delete(jsonFields, fieldNames.orig)
- }
- if okCamel {
- raw = vCamel
- delete(jsonFields, fieldNames.camel)
- }
- return raw, true
- }
-
- sprops := proto.GetProperties(targetType)
- for i := 0; i < target.NumField(); i++ {
- ft := target.Type().Field(i)
- if strings.HasPrefix(ft.Name, "XXX_") {
- continue
- }
-
- valueForField, ok := consumeField(sprops.Prop[i])
- if !ok {
- continue
- }
-
- if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
- return err
- }
- }
- // Check for any oneof fields.
- if len(jsonFields) > 0 {
- for _, oop := range sprops.OneofTypes {
- raw, ok := consumeField(oop.Prop)
- if !ok {
- continue
- }
- nv := reflect.New(oop.Type.Elem())
- target.Field(oop.Field).Set(nv)
- if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
- return err
- }
- }
- }
- // Handle proto2 extensions.
- if len(jsonFields) > 0 {
- if ep, ok := target.Addr().Interface().(proto.Message); ok {
- for _, ext := range proto.RegisteredExtensions(ep) {
- name := fmt.Sprintf("[%s]", ext.Name)
- raw, ok := jsonFields[name]
- if !ok {
- continue
- }
- delete(jsonFields, name)
- nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
- if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
- return err
- }
- if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
- return err
- }
- }
- }
- }
- if !u.AllowUnknownFields && len(jsonFields) > 0 {
- // Pick any field to be the scapegoat.
- var f string
- for fname := range jsonFields {
- f = fname
- break
- }
- return fmt.Errorf("unknown field %q in %v", f, targetType)
- }
- return nil
- }
-
- // Handle arrays (which aren't encoded bytes)
- if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
- var slc []json.RawMessage
- if err := json.Unmarshal(inputValue, &slc); err != nil {
- return err
- }
- if slc != nil {
- l := len(slc)
- target.Set(reflect.MakeSlice(targetType, l, l))
- for i := 0; i < l; i++ {
- if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
- return err
- }
- }
- }
- return nil
- }
-
- // Handle maps (whose keys are always strings)
- if targetType.Kind() == reflect.Map {
- var mp map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &mp); err != nil {
- return err
- }
- if mp != nil {
- target.Set(reflect.MakeMap(targetType))
- for ks, raw := range mp {
- // Unmarshal map key. The core json library already decoded the key into a
- // string, so we handle that specially. Other types were quoted post-serialization.
- var k reflect.Value
- if targetType.Key().Kind() == reflect.String {
- k = reflect.ValueOf(ks)
- } else {
- k = reflect.New(targetType.Key()).Elem()
- var kprop *proto.Properties
- if prop != nil && prop.MapKeyProp != nil {
- kprop = prop.MapKeyProp
- }
- if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
- return err
- }
- }
-
- // Unmarshal map value.
- v := reflect.New(targetType.Elem()).Elem()
- var vprop *proto.Properties
- if prop != nil && prop.MapValProp != nil {
- vprop = prop.MapValProp
- }
- if err := u.unmarshalValue(v, raw, vprop); err != nil {
- return err
- }
- target.SetMapIndex(k, v)
- }
- }
- return nil
- }
-
- // Non-finite numbers can be encoded as strings.
- isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
- if isFloat {
- if num, ok := nonFinite[string(inputValue)]; ok {
- target.SetFloat(num)
- return nil
- }
- }
-
- // integers & floats can be encoded as strings. In this case we drop
- // the quotes and proceed as normal.
- isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
- targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
- targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
- if isNum && strings.HasPrefix(string(inputValue), `"`) {
- inputValue = inputValue[1 : len(inputValue)-1]
- }
-
- // Use the encoding/json for parsing other value types.
- return json.Unmarshal(inputValue, target.Addr().Interface())
-}
-
-func unquote(s string) (string, error) {
- var ret string
- err := json.Unmarshal([]byte(s), &ret)
- return ret, err
-}
-
-// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
-func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
- var prop proto.Properties
- prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
- if origName || prop.JSONName == "" {
- prop.JSONName = prop.OrigName
- }
- return &prop
-}
-
-type fieldNames struct {
- orig, camel string
-}
-
-func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
- opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
- if prop.JSONName != "" {
- opts.camel = prop.JSONName
- }
- return opts
-}
-
-// Writer wrapper inspired by https://blog.golang.org/errors-are-values
-type errWriter struct {
- writer io.Writer
- err error
-}
-
-func (w *errWriter) write(str string) {
- if w.err != nil {
- return
- }
- _, w.err = w.writer.Write([]byte(str))
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-//
-// Numeric keys are sorted in numeric order per
-// https://developers.google.com/protocol-buffers/docs/proto#maps.
-type mapKeys []reflect.Value
-
-func (s mapKeys) Len() int { return len(s) }
-func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s mapKeys) Less(i, j int) bool {
- if k := s[i].Kind(); k == s[j].Kind() {
- switch k {
- case reflect.String:
- return s[i].String() < s[j].String()
- case reflect.Int32, reflect.Int64:
- return s[i].Int() < s[j].Int()
- case reflect.Uint32, reflect.Uint64:
- return s[i].Uint() < s[j].Uint()
- }
- }
- return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
-}
-
-// checkRequiredFields returns an error if any required field in the given proto message is not set.
-// This function is used by both Marshal and Unmarshal. While required fields only exist in a
-// proto2 message, a proto3 message can contain proto2 message(s).
-func checkRequiredFields(pb proto.Message) error {
- // Most well-known type messages do not contain required fields. The "Any" type may contain
- // a message that has required fields.
- //
- // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
- // field in order to transform that into JSON, and that should have returned an error if a
- // required field is not set in the embedded message.
- //
- // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
- // embedded message to store the serialized message in Any.Value field, and that should have
- // returned an error if a required field is not set.
- if _, ok := pb.(wkt); ok {
- return nil
- }
-
- v := reflect.ValueOf(pb)
- // Skip message if it is not a struct pointer.
- if v.Kind() != reflect.Ptr {
- return nil
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return nil
- }
-
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- sfield := v.Type().Field(i)
-
- if sfield.PkgPath != "" {
- // blank PkgPath means the field is exported; skip if not exported
- continue
- }
-
- if strings.HasPrefix(sfield.Name, "XXX_") {
- continue
- }
-
- // Oneof field is an interface implemented by wrapper structs containing the actual oneof
- // field, i.e. an interface containing &T{real_value}.
- if sfield.Tag.Get("protobuf_oneof") != "" {
- if field.Kind() != reflect.Interface {
- continue
- }
- v := field.Elem()
- if v.Kind() != reflect.Ptr || v.IsNil() {
- continue
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct || v.NumField() < 1 {
- continue
- }
- field = v.Field(0)
- sfield = v.Type().Field(0)
- }
-
- protoTag := sfield.Tag.Get("protobuf")
- if protoTag == "" {
- continue
- }
- var prop proto.Properties
- prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
-
- switch field.Kind() {
- case reflect.Map:
- if field.IsNil() {
- continue
- }
- // Check each map value.
- keys := field.MapKeys()
- for _, k := range keys {
- v := field.MapIndex(k)
- if err := checkRequiredFieldsInValue(v); err != nil {
- return err
- }
- }
- case reflect.Slice:
- // Handle non-repeated type, e.g. bytes.
- if !prop.Repeated {
- if prop.Required && field.IsNil() {
- return fmt.Errorf("required field %q is not set", prop.Name)
- }
- continue
- }
-
- // Handle repeated type.
- if field.IsNil() {
- continue
- }
- // Check each slice item.
- for i := 0; i < field.Len(); i++ {
- v := field.Index(i)
- if err := checkRequiredFieldsInValue(v); err != nil {
- return err
- }
- }
- case reflect.Ptr:
- if field.IsNil() {
- if prop.Required {
- return fmt.Errorf("required field %q is not set", prop.Name)
- }
- continue
- }
- if err := checkRequiredFieldsInValue(field); err != nil {
- return err
- }
- }
- }
-
- // Handle proto2 extensions.
- for _, ext := range proto.RegisteredExtensions(pb) {
- if !proto.HasExtension(pb, ext) {
- continue
- }
- ep, err := proto.GetExtension(pb, ext)
- if err != nil {
- return err
- }
- err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func checkRequiredFieldsInValue(v reflect.Value) error {
- if pm, ok := v.Interface().(proto.Message); ok {
- return checkRequiredFields(pm)
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
deleted file mode 100644
index 1ded05bbe..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ /dev/null
@@ -1,2887 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/descriptor.proto
-
-package descriptor
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type FieldDescriptorProto_Type int32
-
-const (
- // 0 is reserved for errors.
- // Order is weird for historical reasons.
- FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
- FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
- // negative values are likely.
- FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
- FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
- // negative values are likely.
- FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
- FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
- FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
- FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
- FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
- // Tag-delimited aggregate.
- // Group type is deprecated and not supported in proto3. However, Proto3
- // implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields.
- FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
- FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
- // New in version 2.
- FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
- FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
- FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
- FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
- FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
- FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17
- FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18
-)
-
-var FieldDescriptorProto_Type_name = map[int32]string{
- 1: "TYPE_DOUBLE",
- 2: "TYPE_FLOAT",
- 3: "TYPE_INT64",
- 4: "TYPE_UINT64",
- 5: "TYPE_INT32",
- 6: "TYPE_FIXED64",
- 7: "TYPE_FIXED32",
- 8: "TYPE_BOOL",
- 9: "TYPE_STRING",
- 10: "TYPE_GROUP",
- 11: "TYPE_MESSAGE",
- 12: "TYPE_BYTES",
- 13: "TYPE_UINT32",
- 14: "TYPE_ENUM",
- 15: "TYPE_SFIXED32",
- 16: "TYPE_SFIXED64",
- 17: "TYPE_SINT32",
- 18: "TYPE_SINT64",
-}
-
-var FieldDescriptorProto_Type_value = map[string]int32{
- "TYPE_DOUBLE": 1,
- "TYPE_FLOAT": 2,
- "TYPE_INT64": 3,
- "TYPE_UINT64": 4,
- "TYPE_INT32": 5,
- "TYPE_FIXED64": 6,
- "TYPE_FIXED32": 7,
- "TYPE_BOOL": 8,
- "TYPE_STRING": 9,
- "TYPE_GROUP": 10,
- "TYPE_MESSAGE": 11,
- "TYPE_BYTES": 12,
- "TYPE_UINT32": 13,
- "TYPE_ENUM": 14,
- "TYPE_SFIXED32": 15,
- "TYPE_SFIXED64": 16,
- "TYPE_SINT32": 17,
- "TYPE_SINT64": 18,
-}
-
-func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
- p := new(FieldDescriptorProto_Type)
- *p = x
- return p
-}
-
-func (x FieldDescriptorProto_Type) String() string {
- return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
-}
-
-func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
- if err != nil {
- return err
- }
- *x = FieldDescriptorProto_Type(value)
- return nil
-}
-
-func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{4, 0}
-}
-
-type FieldDescriptorProto_Label int32
-
-const (
- // 0 is reserved for errors
- FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
- FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
- FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
-)
-
-var FieldDescriptorProto_Label_name = map[int32]string{
- 1: "LABEL_OPTIONAL",
- 2: "LABEL_REQUIRED",
- 3: "LABEL_REPEATED",
-}
-
-var FieldDescriptorProto_Label_value = map[string]int32{
- "LABEL_OPTIONAL": 1,
- "LABEL_REQUIRED": 2,
- "LABEL_REPEATED": 3,
-}
-
-func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
- p := new(FieldDescriptorProto_Label)
- *p = x
- return p
-}
-
-func (x FieldDescriptorProto_Label) String() string {
- return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
-}
-
-func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
- if err != nil {
- return err
- }
- *x = FieldDescriptorProto_Label(value)
- return nil
-}
-
-func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{4, 1}
-}
-
-// Generated classes can be optimized for speed or code size.
-type FileOptions_OptimizeMode int32
-
-const (
- FileOptions_SPEED FileOptions_OptimizeMode = 1
- // etc.
- FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2
- FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
-)
-
-var FileOptions_OptimizeMode_name = map[int32]string{
- 1: "SPEED",
- 2: "CODE_SIZE",
- 3: "LITE_RUNTIME",
-}
-
-var FileOptions_OptimizeMode_value = map[string]int32{
- "SPEED": 1,
- "CODE_SIZE": 2,
- "LITE_RUNTIME": 3,
-}
-
-func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
- p := new(FileOptions_OptimizeMode)
- *p = x
- return p
-}
-
-func (x FileOptions_OptimizeMode) String() string {
- return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
-}
-
-func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
- if err != nil {
- return err
- }
- *x = FileOptions_OptimizeMode(value)
- return nil
-}
-
-func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{10, 0}
-}
-
-type FieldOptions_CType int32
-
-const (
- // Default mode.
- FieldOptions_STRING FieldOptions_CType = 0
- FieldOptions_CORD FieldOptions_CType = 1
- FieldOptions_STRING_PIECE FieldOptions_CType = 2
-)
-
-var FieldOptions_CType_name = map[int32]string{
- 0: "STRING",
- 1: "CORD",
- 2: "STRING_PIECE",
-}
-
-var FieldOptions_CType_value = map[string]int32{
- "STRING": 0,
- "CORD": 1,
- "STRING_PIECE": 2,
-}
-
-func (x FieldOptions_CType) Enum() *FieldOptions_CType {
- p := new(FieldOptions_CType)
- *p = x
- return p
-}
-
-func (x FieldOptions_CType) String() string {
- return proto.EnumName(FieldOptions_CType_name, int32(x))
-}
-
-func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
- if err != nil {
- return err
- }
- *x = FieldOptions_CType(value)
- return nil
-}
-
-func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{12, 0}
-}
-
-type FieldOptions_JSType int32
-
-const (
- // Use the default type.
- FieldOptions_JS_NORMAL FieldOptions_JSType = 0
- // Use JavaScript strings.
- FieldOptions_JS_STRING FieldOptions_JSType = 1
- // Use JavaScript numbers.
- FieldOptions_JS_NUMBER FieldOptions_JSType = 2
-)
-
-var FieldOptions_JSType_name = map[int32]string{
- 0: "JS_NORMAL",
- 1: "JS_STRING",
- 2: "JS_NUMBER",
-}
-
-var FieldOptions_JSType_value = map[string]int32{
- "JS_NORMAL": 0,
- "JS_STRING": 1,
- "JS_NUMBER": 2,
-}
-
-func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
- p := new(FieldOptions_JSType)
- *p = x
- return p
-}
-
-func (x FieldOptions_JSType) String() string {
- return proto.EnumName(FieldOptions_JSType_name, int32(x))
-}
-
-func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
- if err != nil {
- return err
- }
- *x = FieldOptions_JSType(value)
- return nil
-}
-
-func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{12, 1}
-}
-
-// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
-// or neither? HTTP based RPC implementation may choose GET verb for safe
-// methods, and PUT verb for idempotent methods instead of the default POST.
-type MethodOptions_IdempotencyLevel int32
-
-const (
- MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
- MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1
- MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2
-)
-
-var MethodOptions_IdempotencyLevel_name = map[int32]string{
- 0: "IDEMPOTENCY_UNKNOWN",
- 1: "NO_SIDE_EFFECTS",
- 2: "IDEMPOTENT",
-}
-
-var MethodOptions_IdempotencyLevel_value = map[string]int32{
- "IDEMPOTENCY_UNKNOWN": 0,
- "NO_SIDE_EFFECTS": 1,
- "IDEMPOTENT": 2,
-}
-
-func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
- p := new(MethodOptions_IdempotencyLevel)
- *p = x
- return p
-}
-
-func (x MethodOptions_IdempotencyLevel) String() string {
- return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
-}
-
-func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
- if err != nil {
- return err
- }
- *x = MethodOptions_IdempotencyLevel(value)
- return nil
-}
-
-func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{17, 0}
-}
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-type FileDescriptorSet struct {
- File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
-func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorSet) ProtoMessage() {}
-func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{0}
-}
-
-func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
-}
-func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorSet.Merge(m, src)
-}
-func (m *FileDescriptorSet) XXX_Size() int {
- return xxx_messageInfo_FileDescriptorSet.Size(m)
-}
-func (m *FileDescriptorSet) XXX_DiscardUnknown() {
- xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
-
-func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
- if m != nil {
- return m.File
- }
- return nil
-}
-
-// Describes a complete .proto file.
-type FileDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
- // Names of files imported by this file.
- Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
- // Indexes of the public imported files in the dependency list above.
- PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
- // Indexes of the weak imported files in the dependency list.
- // For Google-internal migration only. Do not use.
- WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
- // All top-level definitions in this file.
- MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
- EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
- Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
- Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
- Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
- // This field contains optional information about the original source code.
- // You may safely remove this entire field without harming runtime
- // functionality of the descriptors -- the information is needed only by
- // development tools.
- SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
- // The syntax of the proto file.
- // The supported values are "proto2" and "proto3".
- Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
-func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorProto) ProtoMessage() {}
-func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{1}
-}
-
-func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
-}
-func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorProto.Merge(m, src)
-}
-func (m *FileDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_FileDescriptorProto.Size(m)
-}
-func (m *FileDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
-
-func (m *FileDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *FileDescriptorProto) GetPackage() string {
- if m != nil && m.Package != nil {
- return *m.Package
- }
- return ""
-}
-
-func (m *FileDescriptorProto) GetDependency() []string {
- if m != nil {
- return m.Dependency
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetPublicDependency() []int32 {
- if m != nil {
- return m.PublicDependency
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetWeakDependency() []int32 {
- if m != nil {
- return m.WeakDependency
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
- if m != nil {
- return m.MessageType
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
- if m != nil {
- return m.EnumType
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
- if m != nil {
- return m.Service
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
- if m != nil {
- return m.Extension
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetOptions() *FileOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
- if m != nil {
- return m.SourceCodeInfo
- }
- return nil
-}
-
-func (m *FileDescriptorProto) GetSyntax() string {
- if m != nil && m.Syntax != nil {
- return *m.Syntax
- }
- return ""
-}
-
-// Describes a message type.
-type DescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
- Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
- NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
- EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
- ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
- OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
- Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
- ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
- // Reserved field names, which may not be used by fields in the same message.
- // A given name may only be reserved once.
- ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
-func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto) ProtoMessage() {}
-func (*DescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{2}
-}
-
-func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
-}
-func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto.Merge(m, src)
-}
-func (m *DescriptorProto) XXX_Size() int {
- return xxx_messageInfo_DescriptorProto.Size(m)
-}
-func (m *DescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
-
-func (m *DescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
- if m != nil {
- return m.Field
- }
- return nil
-}
-
-func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
- if m != nil {
- return m.Extension
- }
- return nil
-}
-
-func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
- if m != nil {
- return m.NestedType
- }
- return nil
-}
-
-func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
- if m != nil {
- return m.EnumType
- }
- return nil
-}
-
-func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
- if m != nil {
- return m.ExtensionRange
- }
- return nil
-}
-
-func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
- if m != nil {
- return m.OneofDecl
- }
- return nil
-}
-
-func (m *DescriptorProto) GetOptions() *MessageOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
- if m != nil {
- return m.ReservedRange
- }
- return nil
-}
-
-func (m *DescriptorProto) GetReservedName() []string {
- if m != nil {
- return m.ReservedName
- }
- return nil
-}
-
-type DescriptorProto_ExtensionRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
-func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
-func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{2, 0}
-}
-
-func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
- return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
- xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Range of reserved tag numbers. Reserved tag numbers may not be used by
-// fields or extension ranges in the same message. Reserved ranges may
-// not overlap.
-type DescriptorProto_ReservedRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
-func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ReservedRange) ProtoMessage() {}
-func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{2, 1}
-}
-
-func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Size() int {
- return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
-}
-func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
- xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ReservedRange) GetStart() int32 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-type ExtensionRangeOptions struct {
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} }
-func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRangeOptions) ProtoMessage() {}
-func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{3}
-}
-
-var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_ExtensionRangeOptions
-}
-
-func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
-}
-func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
-}
-func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
-}
-func (m *ExtensionRangeOptions) XXX_Size() int {
- return xxx_messageInfo_ExtensionRangeOptions.Size(m)
-}
-func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
-
-func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-// Describes a field within a message.
-type FieldDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
- Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
- // If type_name is set, this need not be set. If both this and type_name
- // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
- Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
- // For message and enum types, this is the name of the type. If the name
- // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
- // rules are used to find the type (i.e. first the nested types within this
- // message are searched, then within the parent, on up to the root
- // namespace).
- TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
- // For extensions, this is the name of the type being extended. It is
- // resolved in the same manner as type_name.
- Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
- // For numeric types, contains the original text representation of the value.
- // For booleans, "true" or "false".
- // For strings, contains the default text contents (not escaped in any way).
- // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
- // TODO(kenton): Base-64 encode?
- DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
- // If set, gives the index of a oneof in the containing type's oneof_decl
- // list. This field is a member of that oneof.
- OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
- // JSON name of this field. The value is set by protocol compiler. If the
- // user has set a "json_name" option on this field, that option's value
- // will be used. Otherwise, it's deduced from the field's name by converting
- // it to camelCase.
- JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
- Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
-func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FieldDescriptorProto) ProtoMessage() {}
-func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{4}
-}
-
-func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
-}
-func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
-}
-func (m *FieldDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_FieldDescriptorProto.Size(m)
-}
-func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
-
-func (m *FieldDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetNumber() int32 {
- if m != nil && m.Number != nil {
- return *m.Number
- }
- return 0
-}
-
-func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return FieldDescriptorProto_LABEL_OPTIONAL
-}
-
-func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return FieldDescriptorProto_TYPE_DOUBLE
-}
-
-func (m *FieldDescriptorProto) GetTypeName() string {
- if m != nil && m.TypeName != nil {
- return *m.TypeName
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetExtendee() string {
- if m != nil && m.Extendee != nil {
- return *m.Extendee
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetDefaultValue() string {
- if m != nil && m.DefaultValue != nil {
- return *m.DefaultValue
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetOneofIndex() int32 {
- if m != nil && m.OneofIndex != nil {
- return *m.OneofIndex
- }
- return 0
-}
-
-func (m *FieldDescriptorProto) GetJsonName() string {
- if m != nil && m.JsonName != nil {
- return *m.JsonName
- }
- return ""
-}
-
-func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes a oneof.
-type OneofDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
-func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*OneofDescriptorProto) ProtoMessage() {}
-func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{5}
-}
-
-func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
-}
-func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
-}
-func (m *OneofDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_OneofDescriptorProto.Size(m)
-}
-func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
-
-func (m *OneofDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes an enum type.
-type EnumDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- // Range of reserved numeric values. Reserved numeric values may not be used
- // by enum values in the same enum declaration. Reserved ranges may not
- // overlap.
- ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
- // Reserved enum value names, which may not be reused. A given name may only
- // be reserved once.
- ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
-func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto) ProtoMessage() {}
-func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{6}
-}
-
-func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
-}
-func (m *EnumDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_EnumDescriptorProto.Size(m)
-}
-func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
- if m != nil {
- return m.ReservedRange
- }
- return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedName() []string {
- if m != nil {
- return m.ReservedName
- }
- return nil
-}
-
-// Range of reserved numeric values. Reserved values may not be used by
-// entries in the same enum. Reserved ranges may not overlap.
-//
-// Note that this is distinct from DescriptorProto.ReservedRange in that it
-// is inclusive such that it can appropriately represent the entire int32
-// domain.
-type EnumDescriptorProto_EnumReservedRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} }
-func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
-func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{6, 0}
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
- return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-// Describes a value within an enum.
-type EnumValueDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
- Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
-func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumValueDescriptorProto) ProtoMessage() {}
-func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{7}
-}
-
-func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
-}
-func (m *EnumValueDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
-}
-func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumValueDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *EnumValueDescriptorProto) GetNumber() int32 {
- if m != nil && m.Number != nil {
- return *m.Number
- }
- return 0
-}
-
-func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes a service.
-type ServiceDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
- Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
-func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*ServiceDescriptorProto) ProtoMessage() {}
-func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{8}
-}
-
-func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
-}
-func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
-}
-func (m *ServiceDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_ServiceDescriptorProto.Size(m)
-}
-func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
-
-func (m *ServiceDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
- if m != nil {
- return m.Method
- }
- return nil
-}
-
-func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-// Describes a method of a service.
-type MethodDescriptorProto struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- // Input and output type names. These are resolved in the same way as
- // FieldDescriptorProto.type_name, but must refer to a message type.
- InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
- OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
- Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
- // Identifies if client streams multiple client messages
- ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
- // Identifies if server streams multiple server messages
- ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
-func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*MethodDescriptorProto) ProtoMessage() {}
-func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{9}
-}
-
-func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
-}
-func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
-}
-func (m *MethodDescriptorProto) XXX_Size() int {
- return xxx_messageInfo_MethodDescriptorProto.Size(m)
-}
-func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
- xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
-
-const Default_MethodDescriptorProto_ClientStreaming bool = false
-const Default_MethodDescriptorProto_ServerStreaming bool = false
-
-func (m *MethodDescriptorProto) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MethodDescriptorProto) GetInputType() string {
- if m != nil && m.InputType != nil {
- return *m.InputType
- }
- return ""
-}
-
-func (m *MethodDescriptorProto) GetOutputType() string {
- if m != nil && m.OutputType != nil {
- return *m.OutputType
- }
- return ""
-}
-
-func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *MethodDescriptorProto) GetClientStreaming() bool {
- if m != nil && m.ClientStreaming != nil {
- return *m.ClientStreaming
- }
- return Default_MethodDescriptorProto_ClientStreaming
-}
-
-func (m *MethodDescriptorProto) GetServerStreaming() bool {
- if m != nil && m.ServerStreaming != nil {
- return *m.ServerStreaming
- }
- return Default_MethodDescriptorProto_ServerStreaming
-}
-
-type FileOptions struct {
- // Sets the Java package where classes generated from this .proto will be
- // placed. By default, the proto package is used, but this is often
- // inappropriate because proto packages do not normally start with backwards
- // domain names.
- JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
- // If set, all the classes from the .proto file are wrapped in a single
- // outer class with the given name. This applies to both Proto1
- // (equivalent to the old "--one_java_file" option) and Proto2 (where
- // a .proto always translates to a single class, but you may want to
- // explicitly choose the class name).
- JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
- // If set true, then the Java code generator will generate a separate .java
- // file for each top-level message, enum, and service defined in the .proto
- // file. Thus, these types will *not* be nested inside the outer class
- // named by java_outer_classname. However, the outer class will still be
- // generated to contain the file's getDescriptor() method as well as any
- // top-level extensions defined in the file.
- JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
- // This option does nothing.
- JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
- JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
- OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
- // Sets the Go package where structs generated from this .proto will be
- // placed. If omitted, the Go package will be derived from the following:
- // - The basename of the package import path, if provided.
- // - Otherwise, the package statement in the .proto file, if present.
- // - Otherwise, the basename of the .proto file, without extension.
- GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
- // Should generic services be generated in each language? "Generic" services
- // are not specific to any particular RPC system. They are generated by the
- // main code generators in each language (without additional plugins).
- // Generic services were the only kind of service generation supported by
- // early versions of google.protobuf.
- //
- // Generic services are now considered deprecated in favor of using plugins
- // that generate code specific to your particular RPC system. Therefore,
- // these default to false. Old code which depends on generic services should
- // explicitly set them to true.
- CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
- JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
- PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
- PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
- // Is this file deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for everything in the file, or it will be completely ignored; in the very
- // least, this is a formalization for deprecating files.
- Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // Enables the use of arenas for the proto messages in this file. This applies
- // only to generated classes for C++.
- CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
- // Sets the objective c class prefix which is prepended to all objective c
- // generated classes from this .proto. There is no default.
- ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
- // Namespace for generated classes; defaults to the package.
- CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
- // By default Swift generators will take the proto package and CamelCase it
- // replacing '.' with underscore and use that to prefix the types/symbols
- // defined. When this options is provided, they will use this value instead
- // to prefix the types/symbols defined.
- SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
- // Sets the php class prefix which is prepended to all php generated classes
- // from this .proto. Default is empty.
- PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
- // Use this option to change the namespace of php generated classes. Default
- // is empty. When this option is empty, the package name will be used for
- // determining the namespace.
- PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
- // Use this option to change the namespace of php generated metadata classes.
- // Default is empty. When this option is empty, the proto file name will be used
- // for determining the namespace.
- PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
- // Use this option to change the package of ruby generated classes. Default
- // is empty. When this option is not set, the package name will be used for
- // determining the ruby package.
- RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
- // The parser stores options it doesn't recognize here.
- // See the documentation for the "Options" section above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileOptions) Reset() { *m = FileOptions{} }
-func (m *FileOptions) String() string { return proto.CompactTextString(m) }
-func (*FileOptions) ProtoMessage() {}
-func (*FileOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{10}
-}
-
-var extRange_FileOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_FileOptions
-}
-
-func (m *FileOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileOptions.Unmarshal(m, b)
-}
-func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
-}
-func (m *FileOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileOptions.Merge(m, src)
-}
-func (m *FileOptions) XXX_Size() int {
- return xxx_messageInfo_FileOptions.Size(m)
-}
-func (m *FileOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_FileOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileOptions proto.InternalMessageInfo
-
-const Default_FileOptions_JavaMultipleFiles bool = false
-const Default_FileOptions_JavaStringCheckUtf8 bool = false
-const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
-const Default_FileOptions_CcGenericServices bool = false
-const Default_FileOptions_JavaGenericServices bool = false
-const Default_FileOptions_PyGenericServices bool = false
-const Default_FileOptions_PhpGenericServices bool = false
-const Default_FileOptions_Deprecated bool = false
-const Default_FileOptions_CcEnableArenas bool = false
-
-func (m *FileOptions) GetJavaPackage() string {
- if m != nil && m.JavaPackage != nil {
- return *m.JavaPackage
- }
- return ""
-}
-
-func (m *FileOptions) GetJavaOuterClassname() string {
- if m != nil && m.JavaOuterClassname != nil {
- return *m.JavaOuterClassname
- }
- return ""
-}
-
-func (m *FileOptions) GetJavaMultipleFiles() bool {
- if m != nil && m.JavaMultipleFiles != nil {
- return *m.JavaMultipleFiles
- }
- return Default_FileOptions_JavaMultipleFiles
-}
-
-// Deprecated: Do not use.
-func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
- if m != nil && m.JavaGenerateEqualsAndHash != nil {
- return *m.JavaGenerateEqualsAndHash
- }
- return false
-}
-
-func (m *FileOptions) GetJavaStringCheckUtf8() bool {
- if m != nil && m.JavaStringCheckUtf8 != nil {
- return *m.JavaStringCheckUtf8
- }
- return Default_FileOptions_JavaStringCheckUtf8
-}
-
-func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
- if m != nil && m.OptimizeFor != nil {
- return *m.OptimizeFor
- }
- return Default_FileOptions_OptimizeFor
-}
-
-func (m *FileOptions) GetGoPackage() string {
- if m != nil && m.GoPackage != nil {
- return *m.GoPackage
- }
- return ""
-}
-
-func (m *FileOptions) GetCcGenericServices() bool {
- if m != nil && m.CcGenericServices != nil {
- return *m.CcGenericServices
- }
- return Default_FileOptions_CcGenericServices
-}
-
-func (m *FileOptions) GetJavaGenericServices() bool {
- if m != nil && m.JavaGenericServices != nil {
- return *m.JavaGenericServices
- }
- return Default_FileOptions_JavaGenericServices
-}
-
-func (m *FileOptions) GetPyGenericServices() bool {
- if m != nil && m.PyGenericServices != nil {
- return *m.PyGenericServices
- }
- return Default_FileOptions_PyGenericServices
-}
-
-func (m *FileOptions) GetPhpGenericServices() bool {
- if m != nil && m.PhpGenericServices != nil {
- return *m.PhpGenericServices
- }
- return Default_FileOptions_PhpGenericServices
-}
-
-func (m *FileOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_FileOptions_Deprecated
-}
-
-func (m *FileOptions) GetCcEnableArenas() bool {
- if m != nil && m.CcEnableArenas != nil {
- return *m.CcEnableArenas
- }
- return Default_FileOptions_CcEnableArenas
-}
-
-func (m *FileOptions) GetObjcClassPrefix() string {
- if m != nil && m.ObjcClassPrefix != nil {
- return *m.ObjcClassPrefix
- }
- return ""
-}
-
-func (m *FileOptions) GetCsharpNamespace() string {
- if m != nil && m.CsharpNamespace != nil {
- return *m.CsharpNamespace
- }
- return ""
-}
-
-func (m *FileOptions) GetSwiftPrefix() string {
- if m != nil && m.SwiftPrefix != nil {
- return *m.SwiftPrefix
- }
- return ""
-}
-
-func (m *FileOptions) GetPhpClassPrefix() string {
- if m != nil && m.PhpClassPrefix != nil {
- return *m.PhpClassPrefix
- }
- return ""
-}
-
-func (m *FileOptions) GetPhpNamespace() string {
- if m != nil && m.PhpNamespace != nil {
- return *m.PhpNamespace
- }
- return ""
-}
-
-func (m *FileOptions) GetPhpMetadataNamespace() string {
- if m != nil && m.PhpMetadataNamespace != nil {
- return *m.PhpMetadataNamespace
- }
- return ""
-}
-
-func (m *FileOptions) GetRubyPackage() string {
- if m != nil && m.RubyPackage != nil {
- return *m.RubyPackage
- }
- return ""
-}
-
-func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type MessageOptions struct {
- // Set true to use the old proto1 MessageSet wire format for extensions.
- // This is provided for backwards-compatibility with the MessageSet wire
- // format. You should not use this for any other reason: It's less
- // efficient, has fewer features, and is more complicated.
- //
- // The message must be defined exactly as follows:
- // message Foo {
- // option message_set_wire_format = true;
- // extensions 4 to max;
- // }
- // Note that the message cannot have any defined fields; MessageSets only
- // have extensions.
- //
- // All extensions of your type must be singular messages; e.g. they cannot
- // be int32s, enums, or repeated messages.
- //
- // Because this is an option, the above two restrictions are not enforced by
- // the protocol compiler.
- MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
- // Disables the generation of the standard "descriptor()" accessor, which can
- // conflict with a field of the same name. This is meant to make migration
- // from proto1 easier; new code should avoid fields named "descriptor".
- NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
- // Is this message deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the message, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating messages.
- Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // Whether the message is an automatically generated map entry type for the
- // maps field.
- //
- // For maps fields:
- // map map_field = 1;
- // The parsed descriptor looks like:
- // message MapFieldEntry {
- // option map_entry = true;
- // optional KeyType key = 1;
- // optional ValueType value = 2;
- // }
- // repeated MapFieldEntry map_field = 1;
- //
- // Implementations may choose not to generate the map_entry=true message, but
- // use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
- // if the field is a repeated message field.
- //
- // NOTE: Do not set the option in .proto files. Always use the maps syntax
- // instead. The option should only be implicitly set by the proto compiler
- // parser.
- MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MessageOptions) Reset() { *m = MessageOptions{} }
-func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
-func (*MessageOptions) ProtoMessage() {}
-func (*MessageOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{11}
-}
-
-var extRange_MessageOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MessageOptions
-}
-
-func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
-}
-func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
-}
-func (m *MessageOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MessageOptions.Merge(m, src)
-}
-func (m *MessageOptions) XXX_Size() int {
- return xxx_messageInfo_MessageOptions.Size(m)
-}
-func (m *MessageOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_MessageOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
-
-const Default_MessageOptions_MessageSetWireFormat bool = false
-const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
-const Default_MessageOptions_Deprecated bool = false
-
-func (m *MessageOptions) GetMessageSetWireFormat() bool {
- if m != nil && m.MessageSetWireFormat != nil {
- return *m.MessageSetWireFormat
- }
- return Default_MessageOptions_MessageSetWireFormat
-}
-
-func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
- if m != nil && m.NoStandardDescriptorAccessor != nil {
- return *m.NoStandardDescriptorAccessor
- }
- return Default_MessageOptions_NoStandardDescriptorAccessor
-}
-
-func (m *MessageOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_MessageOptions_Deprecated
-}
-
-func (m *MessageOptions) GetMapEntry() bool {
- if m != nil && m.MapEntry != nil {
- return *m.MapEntry
- }
- return false
-}
-
-func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type FieldOptions struct {
- // The ctype option instructs the C++ code generator to use a different
- // representation of the field than it normally would. See the specific
- // options below. This option is not yet implemented in the open source
- // release -- sorry, we'll try to include it in a future version!
- Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
- // The packed option can be enabled for repeated primitive fields to enable
- // a more efficient representation on the wire. Rather than repeatedly
- // writing the tag and type for each element, the entire array is encoded as
- // a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding.
- Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
- // The jstype option determines the JavaScript type used for values of the
- // field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
- // is represented as JavaScript string, which avoids loss of precision that
- // can happen when a large value is converted to a floating point JavaScript.
- // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
- // use the JavaScript "number" type. The behavior of the default option
- // JS_NORMAL is implementation dependent.
- //
- // This option is an enum to permit additional types to be added, e.g.
- // goog.math.Integer.
- Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
- // Should this field be parsed lazily? Lazy applies only to message-type
- // fields. It means that when the outer message is initially parsed, the
- // inner message's contents will not be parsed but instead stored in encoded
- // form. The inner message will actually be parsed when it is first accessed.
- //
- // This is only a hint. Implementations are free to choose whether to use
- // eager or lazy parsing regardless of the value of this option. However,
- // setting this option true suggests that the protocol author believes that
- // using lazy parsing on this field is worth the additional bookkeeping
- // overhead typically needed to implement it.
- //
- // This option does not affect the public interface of any generated code;
- // all method signatures remain the same. Furthermore, thread-safety of the
- // interface is not affected by this option; const methods remain safe to
- // call from multiple threads concurrently, while non-const methods continue
- // to require exclusive access.
- //
- //
- // Note that implementations may choose not to check required fields within
- // a lazy sub-message. That is, calling IsInitialized() on the outer message
- // may return true even if the inner message has missing required fields.
- // This is necessary because otherwise the inner message would have to be
- // parsed in order to perform the check, defeating the purpose of lazy
- // parsing. An implementation which chooses not to check required fields
- // must be consistent about it. That is, for any particular sub-message, the
- // implementation must either *always* check its required fields, or *never*
- // check its required fields, regardless of whether or not the message has
- // been parsed.
- Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
- // Is this field deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for accessors, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating fields.
- Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // For Google-internal migration only. Do not use.
- Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FieldOptions) Reset() { *m = FieldOptions{} }
-func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
-func (*FieldOptions) ProtoMessage() {}
-func (*FieldOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{12}
-}
-
-var extRange_FieldOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_FieldOptions
-}
-
-func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
-}
-func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
-}
-func (m *FieldOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldOptions.Merge(m, src)
-}
-func (m *FieldOptions) XXX_Size() int {
- return xxx_messageInfo_FieldOptions.Size(m)
-}
-func (m *FieldOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
-
-const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
-const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
-const Default_FieldOptions_Lazy bool = false
-const Default_FieldOptions_Deprecated bool = false
-const Default_FieldOptions_Weak bool = false
-
-func (m *FieldOptions) GetCtype() FieldOptions_CType {
- if m != nil && m.Ctype != nil {
- return *m.Ctype
- }
- return Default_FieldOptions_Ctype
-}
-
-func (m *FieldOptions) GetPacked() bool {
- if m != nil && m.Packed != nil {
- return *m.Packed
- }
- return false
-}
-
-func (m *FieldOptions) GetJstype() FieldOptions_JSType {
- if m != nil && m.Jstype != nil {
- return *m.Jstype
- }
- return Default_FieldOptions_Jstype
-}
-
-func (m *FieldOptions) GetLazy() bool {
- if m != nil && m.Lazy != nil {
- return *m.Lazy
- }
- return Default_FieldOptions_Lazy
-}
-
-func (m *FieldOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_FieldOptions_Deprecated
-}
-
-func (m *FieldOptions) GetWeak() bool {
- if m != nil && m.Weak != nil {
- return *m.Weak
- }
- return Default_FieldOptions_Weak
-}
-
-func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type OneofOptions struct {
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *OneofOptions) Reset() { *m = OneofOptions{} }
-func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
-func (*OneofOptions) ProtoMessage() {}
-func (*OneofOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{13}
-}
-
-var extRange_OneofOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_OneofOptions
-}
-
-func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
-}
-func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
-}
-func (m *OneofOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OneofOptions.Merge(m, src)
-}
-func (m *OneofOptions) XXX_Size() int {
- return xxx_messageInfo_OneofOptions.Size(m)
-}
-func (m *OneofOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_OneofOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
-
-func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type EnumOptions struct {
- // Set this option to true to allow mapping different tag names to the same
- // value.
- AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
- // Is this enum deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating enums.
- Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumOptions) Reset() { *m = EnumOptions{} }
-func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumOptions) ProtoMessage() {}
-func (*EnumOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{14}
-}
-
-var extRange_EnumOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_EnumOptions
-}
-
-func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
-}
-func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
-}
-func (m *EnumOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumOptions.Merge(m, src)
-}
-func (m *EnumOptions) XXX_Size() int {
- return xxx_messageInfo_EnumOptions.Size(m)
-}
-func (m *EnumOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
-
-const Default_EnumOptions_Deprecated bool = false
-
-func (m *EnumOptions) GetAllowAlias() bool {
- if m != nil && m.AllowAlias != nil {
- return *m.AllowAlias
- }
- return false
-}
-
-func (m *EnumOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_EnumOptions_Deprecated
-}
-
-func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type EnumValueOptions struct {
- // Is this enum value deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum value, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating enum values.
- Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
-func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumValueOptions) ProtoMessage() {}
-func (*EnumValueOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{15}
-}
-
-var extRange_EnumValueOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_EnumValueOptions
-}
-
-func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
-}
-func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
-}
-func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumValueOptions.Merge(m, src)
-}
-func (m *EnumValueOptions) XXX_Size() int {
- return xxx_messageInfo_EnumValueOptions.Size(m)
-}
-func (m *EnumValueOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
-
-const Default_EnumValueOptions_Deprecated bool = false
-
-func (m *EnumValueOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_EnumValueOptions_Deprecated
-}
-
-func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type ServiceOptions struct {
- // Is this service deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the service, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating services.
- Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
-func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
-func (*ServiceOptions) ProtoMessage() {}
-func (*ServiceOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{16}
-}
-
-var extRange_ServiceOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_ServiceOptions
-}
-
-func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
-}
-func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
-}
-func (m *ServiceOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceOptions.Merge(m, src)
-}
-func (m *ServiceOptions) XXX_Size() int {
- return xxx_messageInfo_ServiceOptions.Size(m)
-}
-func (m *ServiceOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
-
-const Default_ServiceOptions_Deprecated bool = false
-
-func (m *ServiceOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_ServiceOptions_Deprecated
-}
-
-func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-type MethodOptions struct {
- // Is this method deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the method, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating methods.
- Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
- // The parser stores options it doesn't recognize here. See above.
- UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MethodOptions) Reset() { *m = MethodOptions{} }
-func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
-func (*MethodOptions) ProtoMessage() {}
-func (*MethodOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{17}
-}
-
-var extRange_MethodOptions = []proto.ExtensionRange{
- {Start: 1000, End: 536870911},
-}
-
-func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MethodOptions
-}
-
-func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
-}
-func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
-}
-func (m *MethodOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MethodOptions.Merge(m, src)
-}
-func (m *MethodOptions) XXX_Size() int {
- return xxx_messageInfo_MethodOptions.Size(m)
-}
-func (m *MethodOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_MethodOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
-
-const Default_MethodOptions_Deprecated bool = false
-const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
-
-func (m *MethodOptions) GetDeprecated() bool {
- if m != nil && m.Deprecated != nil {
- return *m.Deprecated
- }
- return Default_MethodOptions_Deprecated
-}
-
-func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
- if m != nil && m.IdempotencyLevel != nil {
- return *m.IdempotencyLevel
- }
- return Default_MethodOptions_IdempotencyLevel
-}
-
-func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
- if m != nil {
- return m.UninterpretedOption
- }
- return nil
-}
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-type UninterpretedOption struct {
- Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
- // The value of the uninterpreted option, in whatever type the tokenizer
- // identified it as during parsing. Exactly one of these should be set.
- IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
- PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
- NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
- StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
- AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
-func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption) ProtoMessage() {}
-func (*UninterpretedOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{18}
-}
-
-func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
-}
-func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
-}
-func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UninterpretedOption.Merge(m, src)
-}
-func (m *UninterpretedOption) XXX_Size() int {
- return xxx_messageInfo_UninterpretedOption.Size(m)
-}
-func (m *UninterpretedOption) XXX_DiscardUnknown() {
- xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
-
-func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *UninterpretedOption) GetIdentifierValue() string {
- if m != nil && m.IdentifierValue != nil {
- return *m.IdentifierValue
- }
- return ""
-}
-
-func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
- if m != nil && m.PositiveIntValue != nil {
- return *m.PositiveIntValue
- }
- return 0
-}
-
-func (m *UninterpretedOption) GetNegativeIntValue() int64 {
- if m != nil && m.NegativeIntValue != nil {
- return *m.NegativeIntValue
- }
- return 0
-}
-
-func (m *UninterpretedOption) GetDoubleValue() float64 {
- if m != nil && m.DoubleValue != nil {
- return *m.DoubleValue
- }
- return 0
-}
-
-func (m *UninterpretedOption) GetStringValue() []byte {
- if m != nil {
- return m.StringValue
- }
- return nil
-}
-
-func (m *UninterpretedOption) GetAggregateValue() string {
- if m != nil && m.AggregateValue != nil {
- return *m.AggregateValue
- }
- return ""
-}
-
-// The name of the uninterpreted option. Each string represents a segment in
-// a dot-separated name. is_extension is true iff a segment represents an
-// extension (denoted with parentheses in options specs in .proto files).
-// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
-// "foo.(bar.baz).qux".
-type UninterpretedOption_NamePart struct {
- NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
- IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
-func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption_NamePart) ProtoMessage() {}
-func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{18, 0}
-}
-
-func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
-}
-func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
-}
-func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
-}
-func (m *UninterpretedOption_NamePart) XXX_Size() int {
- return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
-}
-func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
- xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
-
-func (m *UninterpretedOption_NamePart) GetNamePart() string {
- if m != nil && m.NamePart != nil {
- return *m.NamePart
- }
- return ""
-}
-
-func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
- if m != nil && m.IsExtension != nil {
- return *m.IsExtension
- }
- return false
-}
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-type SourceCodeInfo struct {
- // A Location identifies a piece of source code in a .proto file which
- // corresponds to a particular definition. This information is intended
- // to be useful to IDEs, code indexers, documentation generators, and similar
- // tools.
- //
- // For example, say we have a file like:
- // message Foo {
- // optional string foo = 1;
- // }
- // Let's look at just the field definition:
- // optional string foo = 1;
- // ^ ^^ ^^ ^ ^^^
- // a bc de f ghi
- // We have the following locations:
- // span path represents
- // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
- // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
- // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
- // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
- // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
- //
- // Notes:
- // - A location may refer to a repeated field itself (i.e. not to any
- // particular index within it). This is used whenever a set of elements are
- // logically enclosed in a single code segment. For example, an entire
- // extend block (possibly containing multiple extension definitions) will
- // have an outer location whose path refers to the "extensions" repeated
- // field without an index.
- // - Multiple locations may have the same path. This happens when a single
- // logical declaration is spread out across multiple places. The most
- // obvious example is the "extend" block again -- there may be multiple
- // extend blocks in the same scope, each of which will have the same path.
- // - A location's span is not always a subset of its parent's span. For
- // example, the "extendee" of an extension declaration appears at the
- // beginning of the "extend" block and is shared by all extensions within
- // the block.
- // - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
- // both a type and a field in a single declaration. Thus, the locations
- // corresponding to the type and field and their components will overlap.
- // - Code which tries to interpret locations should probably be designed to
- // ignore those that it doesn't understand, as more types of locations could
- // be recorded in the future.
- Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
-func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo) ProtoMessage() {}
-func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{19}
-}
-
-func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
-}
-func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SourceCodeInfo.Merge(m, src)
-}
-func (m *SourceCodeInfo) XXX_Size() int {
- return xxx_messageInfo_SourceCodeInfo.Size(m)
-}
-func (m *SourceCodeInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
-
-func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
- if m != nil {
- return m.Location
- }
- return nil
-}
-
-type SourceCodeInfo_Location struct {
- // Identifies which part of the FileDescriptorProto was defined at this
- // location.
- //
- // Each element is a field number or an index. They form a path from
- // the root FileDescriptorProto to the place where the definition. For
- // example, this path:
- // [ 4, 3, 2, 7, 1 ]
- // refers to:
- // file.message_type(3) // 4, 3
- // .field(7) // 2, 7
- // .name() // 1
- // This is because FileDescriptorProto.message_type has field number 4:
- // repeated DescriptorProto message_type = 4;
- // and DescriptorProto.field has field number 2:
- // repeated FieldDescriptorProto field = 2;
- // and FieldDescriptorProto.name has field number 1:
- // optional string name = 1;
- //
- // Thus, the above path gives the location of a field name. If we removed
- // the last element:
- // [ 4, 3, 2, 7 ]
- // this path refers to the whole field declaration (from the beginning
- // of the label to the terminating semicolon).
- Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
- // Always has exactly three or four elements: start line, start column,
- // end line (optional, otherwise assumed same as start line), end column.
- // These are packed into a single field for efficiency. Note that line
- // and column numbers are zero-based -- typically you will want to add
- // 1 to each before displaying to a user.
- Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
- // If this SourceCodeInfo represents a complete declaration, these are any
- // comments appearing before and after the declaration which appear to be
- // attached to the declaration.
- //
- // A series of line comments appearing on consecutive lines, with no other
- // tokens appearing on those lines, will be treated as a single comment.
- //
- // leading_detached_comments will keep paragraphs of comments that appear
- // before (but not connected to) the current element. Each paragraph,
- // separated by empty lines, will be one comment element in the repeated
- // field.
- //
- // Only the comment content is provided; comment markers (e.g. //) are
- // stripped out. For block comments, leading whitespace and an asterisk
- // will be stripped from the beginning of each line other than the first.
- // Newlines are included in the output.
- //
- // Examples:
- //
- // optional int32 foo = 1; // Comment attached to foo.
- // // Comment attached to bar.
- // optional int32 bar = 2;
- //
- // optional string baz = 3;
- // // Comment attached to baz.
- // // Another line attached to baz.
- //
- // // Comment attached to qux.
- // //
- // // Another line attached to qux.
- // optional double qux = 4;
- //
- // // Detached comment for corge. This is not leading or trailing comments
- // // to qux or corge because there are blank lines separating it from
- // // both.
- //
- // // Detached comment for corge paragraph 2.
- //
- // optional string corge = 5;
- // /* Block comment attached
- // * to corge. Leading asterisks
- // * will be removed. */
- // /* Block comment attached to
- // * grault. */
- // optional int32 grault = 6;
- //
- // // ignored detached comments.
- LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
- TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
- LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
-func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo_Location) ProtoMessage() {}
-func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{19, 0}
-}
-
-func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
-}
-func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
-}
-func (m *SourceCodeInfo_Location) XXX_Size() int {
- return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
-}
-func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
- xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
-
-func (m *SourceCodeInfo_Location) GetPath() []int32 {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-func (m *SourceCodeInfo_Location) GetSpan() []int32 {
- if m != nil {
- return m.Span
- }
- return nil
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingComments() string {
- if m != nil && m.LeadingComments != nil {
- return *m.LeadingComments
- }
- return ""
-}
-
-func (m *SourceCodeInfo_Location) GetTrailingComments() string {
- if m != nil && m.TrailingComments != nil {
- return *m.TrailingComments
- }
- return ""
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
- if m != nil {
- return m.LeadingDetachedComments
- }
- return nil
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-type GeneratedCodeInfo struct {
- // An Annotation connects some span of text in generated code to an element
- // of its generating .proto file.
- Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
-func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo) ProtoMessage() {}
-func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{20}
-}
-
-func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
-}
-func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
-}
-func (m *GeneratedCodeInfo) XXX_Size() int {
- return xxx_messageInfo_GeneratedCodeInfo.Size(m)
-}
-func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
- if m != nil {
- return m.Annotation
- }
- return nil
-}
-
-type GeneratedCodeInfo_Annotation struct {
- // Identifies the element in the original source .proto file. This field
- // is formatted the same as SourceCodeInfo.Location.path.
- Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
- // Identifies the filesystem path to the original source .proto.
- SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
- // Identifies the starting offset in bytes in the generated code
- // that relates to the identified object.
- Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
- // Identifies the ending offset in bytes in the generated code that
- // relates to the identified offset. The end offset should be one past
- // the last relevant byte (so the length of the text = end - begin).
- End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} }
-func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
-func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_e5baabe45344a177, []int{20, 0}
-}
-
-func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
- return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
- xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
- if m != nil && m.SourceFile != nil {
- return *m.SourceFile
- }
- return ""
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
- if m != nil && m.Begin != nil {
- return *m.Begin
- }
- return 0
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
- proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
- proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
- proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
- proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
- proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
- proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
- proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
- proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
- proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
- proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
- proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
- proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
- proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
- proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
- proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
- proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
- proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
- proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
- proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
- proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
- proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
- proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
- proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
- proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
- proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
- proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
- proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
- proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
- proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
- proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
- proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
- proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
-}
-
-func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
-
-var fileDescriptor_e5baabe45344a177 = []byte{
- // 2589 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
- 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
- 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
- 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
- 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
- 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
- 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
- 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
- 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
- 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
- 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
- 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
- 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
- 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
- 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
- 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
- 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
- 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
- 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
- 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
- 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
- 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
- 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
- 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
- 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
- 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
- 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
- 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
- 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
- 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
- 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
- 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
- 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
- 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
- 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
- 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
- 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
- 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
- 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
- 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
- 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
- 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
- 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
- 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
- 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
- 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
- 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
- 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
- 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
- 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
- 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
- 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
- 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
- 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
- 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
- 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
- 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
- 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
- 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
- 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
- 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
- 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
- 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
- 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
- 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
- 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
- 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
- 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
- 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
- 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
- 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
- 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
- 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
- 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
- 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
- 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
- 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
- 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
- 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
- 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
- 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
- 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
- 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
- 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
- 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
- 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
- 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
- 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
- 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
- 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
- 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
- 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
- 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
- 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
- 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
- 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
- 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
- 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
- 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
- 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
- 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
- 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
- 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
- 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
- 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
- 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
- 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
- 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
- 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
- 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
- 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
- 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
- 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
- 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
- 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
- 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
- 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
- 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
- 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
- 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
- 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
- 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
- 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
- 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
- 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
- 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
- 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
- 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
- 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
- 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
- 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
- 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
- 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
- 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
- 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
- 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
- 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
- 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
- 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
- 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
- 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
- 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
- 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
- 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
- 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
- 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
- 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
- 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
- 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
- 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
- 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
- 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
- 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
- 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
- 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
- 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
- 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
- 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
- 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
- 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
- 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
deleted file mode 100644
index ed08fcbc5..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
+++ /dev/null
@@ -1,883 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Author: kenton@google.com (Kenton Varda)
-// Based on original Protocol Buffers design by
-// Sanjay Ghemawat, Jeff Dean, and others.
-//
-// The messages in this file describe the definitions found in .proto files.
-// A valid .proto file can be translated directly to a FileDescriptorProto
-// without any other information (e.g. without reading its imports).
-
-
-syntax = "proto2";
-
-package google.protobuf;
-option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DescriptorProtos";
-option csharp_namespace = "Google.Protobuf.Reflection";
-option objc_class_prefix = "GPB";
-option cc_enable_arenas = true;
-
-// descriptor.proto must be optimized for speed because reflection-based
-// algorithms don't work during bootstrapping.
-option optimize_for = SPEED;
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-message FileDescriptorSet {
- repeated FileDescriptorProto file = 1;
-}
-
-// Describes a complete .proto file.
-message FileDescriptorProto {
- optional string name = 1; // file name, relative to root of source tree
- optional string package = 2; // e.g. "foo", "foo.bar", etc.
-
- // Names of files imported by this file.
- repeated string dependency = 3;
- // Indexes of the public imported files in the dependency list above.
- repeated int32 public_dependency = 10;
- // Indexes of the weak imported files in the dependency list.
- // For Google-internal migration only. Do not use.
- repeated int32 weak_dependency = 11;
-
- // All top-level definitions in this file.
- repeated DescriptorProto message_type = 4;
- repeated EnumDescriptorProto enum_type = 5;
- repeated ServiceDescriptorProto service = 6;
- repeated FieldDescriptorProto extension = 7;
-
- optional FileOptions options = 8;
-
- // This field contains optional information about the original source code.
- // You may safely remove this entire field without harming runtime
- // functionality of the descriptors -- the information is needed only by
- // development tools.
- optional SourceCodeInfo source_code_info = 9;
-
- // The syntax of the proto file.
- // The supported values are "proto2" and "proto3".
- optional string syntax = 12;
-}
-
-// Describes a message type.
-message DescriptorProto {
- optional string name = 1;
-
- repeated FieldDescriptorProto field = 2;
- repeated FieldDescriptorProto extension = 6;
-
- repeated DescriptorProto nested_type = 3;
- repeated EnumDescriptorProto enum_type = 4;
-
- message ExtensionRange {
- optional int32 start = 1;
- optional int32 end = 2;
-
- optional ExtensionRangeOptions options = 3;
- }
- repeated ExtensionRange extension_range = 5;
-
- repeated OneofDescriptorProto oneof_decl = 8;
-
- optional MessageOptions options = 7;
-
- // Range of reserved tag numbers. Reserved tag numbers may not be used by
- // fields or extension ranges in the same message. Reserved ranges may
- // not overlap.
- message ReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Exclusive.
- }
- repeated ReservedRange reserved_range = 9;
- // Reserved field names, which may not be used by fields in the same message.
- // A given name may only be reserved once.
- repeated string reserved_name = 10;
-}
-
-message ExtensionRangeOptions {
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-// Describes a field within a message.
-message FieldDescriptorProto {
- enum Type {
- // 0 is reserved for errors.
- // Order is weird for historical reasons.
- TYPE_DOUBLE = 1;
- TYPE_FLOAT = 2;
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
- // negative values are likely.
- TYPE_INT64 = 3;
- TYPE_UINT64 = 4;
- // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
- // negative values are likely.
- TYPE_INT32 = 5;
- TYPE_FIXED64 = 6;
- TYPE_FIXED32 = 7;
- TYPE_BOOL = 8;
- TYPE_STRING = 9;
- // Tag-delimited aggregate.
- // Group type is deprecated and not supported in proto3. However, Proto3
- // implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields.
- TYPE_GROUP = 10;
- TYPE_MESSAGE = 11; // Length-delimited aggregate.
-
- // New in version 2.
- TYPE_BYTES = 12;
- TYPE_UINT32 = 13;
- TYPE_ENUM = 14;
- TYPE_SFIXED32 = 15;
- TYPE_SFIXED64 = 16;
- TYPE_SINT32 = 17; // Uses ZigZag encoding.
- TYPE_SINT64 = 18; // Uses ZigZag encoding.
- };
-
- enum Label {
- // 0 is reserved for errors
- LABEL_OPTIONAL = 1;
- LABEL_REQUIRED = 2;
- LABEL_REPEATED = 3;
- };
-
- optional string name = 1;
- optional int32 number = 3;
- optional Label label = 4;
-
- // If type_name is set, this need not be set. If both this and type_name
- // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
- optional Type type = 5;
-
- // For message and enum types, this is the name of the type. If the name
- // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
- // rules are used to find the type (i.e. first the nested types within this
- // message are searched, then within the parent, on up to the root
- // namespace).
- optional string type_name = 6;
-
- // For extensions, this is the name of the type being extended. It is
- // resolved in the same manner as type_name.
- optional string extendee = 2;
-
- // For numeric types, contains the original text representation of the value.
- // For booleans, "true" or "false".
- // For strings, contains the default text contents (not escaped in any way).
- // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
- // TODO(kenton): Base-64 encode?
- optional string default_value = 7;
-
- // If set, gives the index of a oneof in the containing type's oneof_decl
- // list. This field is a member of that oneof.
- optional int32 oneof_index = 9;
-
- // JSON name of this field. The value is set by protocol compiler. If the
- // user has set a "json_name" option on this field, that option's value
- // will be used. Otherwise, it's deduced from the field's name by converting
- // it to camelCase.
- optional string json_name = 10;
-
- optional FieldOptions options = 8;
-}
-
-// Describes a oneof.
-message OneofDescriptorProto {
- optional string name = 1;
- optional OneofOptions options = 2;
-}
-
-// Describes an enum type.
-message EnumDescriptorProto {
- optional string name = 1;
-
- repeated EnumValueDescriptorProto value = 2;
-
- optional EnumOptions options = 3;
-
- // Range of reserved numeric values. Reserved values may not be used by
- // entries in the same enum. Reserved ranges may not overlap.
- //
- // Note that this is distinct from DescriptorProto.ReservedRange in that it
- // is inclusive such that it can appropriately represent the entire int32
- // domain.
- message EnumReservedRange {
- optional int32 start = 1; // Inclusive.
- optional int32 end = 2; // Inclusive.
- }
-
- // Range of reserved numeric values. Reserved numeric values may not be used
- // by enum values in the same enum declaration. Reserved ranges may not
- // overlap.
- repeated EnumReservedRange reserved_range = 4;
-
- // Reserved enum value names, which may not be reused. A given name may only
- // be reserved once.
- repeated string reserved_name = 5;
-}
-
-// Describes a value within an enum.
-message EnumValueDescriptorProto {
- optional string name = 1;
- optional int32 number = 2;
-
- optional EnumValueOptions options = 3;
-}
-
-// Describes a service.
-message ServiceDescriptorProto {
- optional string name = 1;
- repeated MethodDescriptorProto method = 2;
-
- optional ServiceOptions options = 3;
-}
-
-// Describes a method of a service.
-message MethodDescriptorProto {
- optional string name = 1;
-
- // Input and output type names. These are resolved in the same way as
- // FieldDescriptorProto.type_name, but must refer to a message type.
- optional string input_type = 2;
- optional string output_type = 3;
-
- optional MethodOptions options = 4;
-
- // Identifies if client streams multiple client messages
- optional bool client_streaming = 5 [default=false];
- // Identifies if server streams multiple server messages
- optional bool server_streaming = 6 [default=false];
-}
-
-
-// ===================================================================
-// Options
-
-// Each of the definitions above may have "options" attached. These are
-// just annotations which may cause code to be generated slightly differently
-// or may contain hints for code that manipulates protocol messages.
-//
-// Clients may define custom options as extensions of the *Options messages.
-// These extensions may not yet be known at parsing time, so the parser cannot
-// store the values in them. Instead it stores them in a field in the *Options
-// message called uninterpreted_option. This field must have the same name
-// across all *Options messages. We then use this field to populate the
-// extensions when we build a descriptor, at which point all protos have been
-// parsed and so all extensions are known.
-//
-// Extension numbers for custom options may be chosen as follows:
-// * For options which will only be used within a single application or
-// organization, or for experimental options, use field numbers 50000
-// through 99999. It is up to you to ensure that you do not use the
-// same number for multiple options.
-// * For options which will be published and used publicly by multiple
-// independent entities, e-mail protobuf-global-extension-registry@google.com
-// to reserve extension numbers. Simply provide your project name (e.g.
-// Objective-C plugin) and your project website (if available) -- there's no
-// need to explain how you intend to use them. Usually you only need one
-// extension number. You can declare multiple options with only one extension
-// number by putting them in a sub-message. See the Custom Options section of
-// the docs for examples:
-// https://developers.google.com/protocol-buffers/docs/proto#options
-// If this turns out to be popular, a web service will be set up
-// to automatically assign option numbers.
-
-
-message FileOptions {
-
- // Sets the Java package where classes generated from this .proto will be
- // placed. By default, the proto package is used, but this is often
- // inappropriate because proto packages do not normally start with backwards
- // domain names.
- optional string java_package = 1;
-
-
- // If set, all the classes from the .proto file are wrapped in a single
- // outer class with the given name. This applies to both Proto1
- // (equivalent to the old "--one_java_file" option) and Proto2 (where
- // a .proto always translates to a single class, but you may want to
- // explicitly choose the class name).
- optional string java_outer_classname = 8;
-
- // If set true, then the Java code generator will generate a separate .java
- // file for each top-level message, enum, and service defined in the .proto
- // file. Thus, these types will *not* be nested inside the outer class
- // named by java_outer_classname. However, the outer class will still be
- // generated to contain the file's getDescriptor() method as well as any
- // top-level extensions defined in the file.
- optional bool java_multiple_files = 10 [default=false];
-
- // This option does nothing.
- optional bool java_generate_equals_and_hash = 20 [deprecated=true];
-
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
- optional bool java_string_check_utf8 = 27 [default=false];
-
-
- // Generated classes can be optimized for speed or code size.
- enum OptimizeMode {
- SPEED = 1; // Generate complete code for parsing, serialization,
- // etc.
- CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
- LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
- }
- optional OptimizeMode optimize_for = 9 [default=SPEED];
-
- // Sets the Go package where structs generated from this .proto will be
- // placed. If omitted, the Go package will be derived from the following:
- // - The basename of the package import path, if provided.
- // - Otherwise, the package statement in the .proto file, if present.
- // - Otherwise, the basename of the .proto file, without extension.
- optional string go_package = 11;
-
-
-
- // Should generic services be generated in each language? "Generic" services
- // are not specific to any particular RPC system. They are generated by the
- // main code generators in each language (without additional plugins).
- // Generic services were the only kind of service generation supported by
- // early versions of google.protobuf.
- //
- // Generic services are now considered deprecated in favor of using plugins
- // that generate code specific to your particular RPC system. Therefore,
- // these default to false. Old code which depends on generic services should
- // explicitly set them to true.
- optional bool cc_generic_services = 16 [default=false];
- optional bool java_generic_services = 17 [default=false];
- optional bool py_generic_services = 18 [default=false];
- optional bool php_generic_services = 42 [default=false];
-
- // Is this file deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for everything in the file, or it will be completely ignored; in the very
- // least, this is a formalization for deprecating files.
- optional bool deprecated = 23 [default=false];
-
- // Enables the use of arenas for the proto messages in this file. This applies
- // only to generated classes for C++.
- optional bool cc_enable_arenas = 31 [default=false];
-
-
- // Sets the objective c class prefix which is prepended to all objective c
- // generated classes from this .proto. There is no default.
- optional string objc_class_prefix = 36;
-
- // Namespace for generated classes; defaults to the package.
- optional string csharp_namespace = 37;
-
- // By default Swift generators will take the proto package and CamelCase it
- // replacing '.' with underscore and use that to prefix the types/symbols
- // defined. When this options is provided, they will use this value instead
- // to prefix the types/symbols defined.
- optional string swift_prefix = 39;
-
- // Sets the php class prefix which is prepended to all php generated classes
- // from this .proto. Default is empty.
- optional string php_class_prefix = 40;
-
- // Use this option to change the namespace of php generated classes. Default
- // is empty. When this option is empty, the package name will be used for
- // determining the namespace.
- optional string php_namespace = 41;
-
-
- // Use this option to change the namespace of php generated metadata classes.
- // Default is empty. When this option is empty, the proto file name will be used
- // for determining the namespace.
- optional string php_metadata_namespace = 44;
-
- // Use this option to change the package of ruby generated classes. Default
- // is empty. When this option is not set, the package name will be used for
- // determining the ruby package.
- optional string ruby_package = 45;
-
- // The parser stores options it doesn't recognize here.
- // See the documentation for the "Options" section above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message.
- // See the documentation for the "Options" section above.
- extensions 1000 to max;
-
- reserved 38;
-}
-
-message MessageOptions {
- // Set true to use the old proto1 MessageSet wire format for extensions.
- // This is provided for backwards-compatibility with the MessageSet wire
- // format. You should not use this for any other reason: It's less
- // efficient, has fewer features, and is more complicated.
- //
- // The message must be defined exactly as follows:
- // message Foo {
- // option message_set_wire_format = true;
- // extensions 4 to max;
- // }
- // Note that the message cannot have any defined fields; MessageSets only
- // have extensions.
- //
- // All extensions of your type must be singular messages; e.g. they cannot
- // be int32s, enums, or repeated messages.
- //
- // Because this is an option, the above two restrictions are not enforced by
- // the protocol compiler.
- optional bool message_set_wire_format = 1 [default=false];
-
- // Disables the generation of the standard "descriptor()" accessor, which can
- // conflict with a field of the same name. This is meant to make migration
- // from proto1 easier; new code should avoid fields named "descriptor".
- optional bool no_standard_descriptor_accessor = 2 [default=false];
-
- // Is this message deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the message, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating messages.
- optional bool deprecated = 3 [default=false];
-
- // Whether the message is an automatically generated map entry type for the
- // maps field.
- //
- // For maps fields:
- // map map_field = 1;
- // The parsed descriptor looks like:
- // message MapFieldEntry {
- // option map_entry = true;
- // optional KeyType key = 1;
- // optional ValueType value = 2;
- // }
- // repeated MapFieldEntry map_field = 1;
- //
- // Implementations may choose not to generate the map_entry=true message, but
- // use a native map in the target language to hold the keys and values.
- // The reflection APIs in such implementions still need to work as
- // if the field is a repeated message field.
- //
- // NOTE: Do not set the option in .proto files. Always use the maps syntax
- // instead. The option should only be implicitly set by the proto compiler
- // parser.
- optional bool map_entry = 7;
-
- reserved 8; // javalite_serializable
- reserved 9; // javanano_as_lite
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message FieldOptions {
- // The ctype option instructs the C++ code generator to use a different
- // representation of the field than it normally would. See the specific
- // options below. This option is not yet implemented in the open source
- // release -- sorry, we'll try to include it in a future version!
- optional CType ctype = 1 [default = STRING];
- enum CType {
- // Default mode.
- STRING = 0;
-
- CORD = 1;
-
- STRING_PIECE = 2;
- }
- // The packed option can be enabled for repeated primitive fields to enable
- // a more efficient representation on the wire. Rather than repeatedly
- // writing the tag and type for each element, the entire array is encoded as
- // a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding.
- optional bool packed = 2;
-
- // The jstype option determines the JavaScript type used for values of the
- // field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
- // is represented as JavaScript string, which avoids loss of precision that
- // can happen when a large value is converted to a floating point JavaScript.
- // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
- // use the JavaScript "number" type. The behavior of the default option
- // JS_NORMAL is implementation dependent.
- //
- // This option is an enum to permit additional types to be added, e.g.
- // goog.math.Integer.
- optional JSType jstype = 6 [default = JS_NORMAL];
- enum JSType {
- // Use the default type.
- JS_NORMAL = 0;
-
- // Use JavaScript strings.
- JS_STRING = 1;
-
- // Use JavaScript numbers.
- JS_NUMBER = 2;
- }
-
- // Should this field be parsed lazily? Lazy applies only to message-type
- // fields. It means that when the outer message is initially parsed, the
- // inner message's contents will not be parsed but instead stored in encoded
- // form. The inner message will actually be parsed when it is first accessed.
- //
- // This is only a hint. Implementations are free to choose whether to use
- // eager or lazy parsing regardless of the value of this option. However,
- // setting this option true suggests that the protocol author believes that
- // using lazy parsing on this field is worth the additional bookkeeping
- // overhead typically needed to implement it.
- //
- // This option does not affect the public interface of any generated code;
- // all method signatures remain the same. Furthermore, thread-safety of the
- // interface is not affected by this option; const methods remain safe to
- // call from multiple threads concurrently, while non-const methods continue
- // to require exclusive access.
- //
- //
- // Note that implementations may choose not to check required fields within
- // a lazy sub-message. That is, calling IsInitialized() on the outer message
- // may return true even if the inner message has missing required fields.
- // This is necessary because otherwise the inner message would have to be
- // parsed in order to perform the check, defeating the purpose of lazy
- // parsing. An implementation which chooses not to check required fields
- // must be consistent about it. That is, for any particular sub-message, the
- // implementation must either *always* check its required fields, or *never*
- // check its required fields, regardless of whether or not the message has
- // been parsed.
- optional bool lazy = 5 [default=false];
-
- // Is this field deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for accessors, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating fields.
- optional bool deprecated = 3 [default=false];
-
- // For Google-internal migration only. Do not use.
- optional bool weak = 10 [default=false];
-
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-
- reserved 4; // removed jtype
-}
-
-message OneofOptions {
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message EnumOptions {
-
- // Set this option to true to allow mapping different tag names to the same
- // value.
- optional bool allow_alias = 2;
-
- // Is this enum deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum, or it will be completely ignored; in the very least, this
- // is a formalization for deprecating enums.
- optional bool deprecated = 3 [default=false];
-
- reserved 5; // javanano_as_lite
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message EnumValueOptions {
- // Is this enum value deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the enum value, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating enum values.
- optional bool deprecated = 1 [default=false];
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message ServiceOptions {
-
- // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
- // framework. We apologize for hoarding these numbers to ourselves, but
- // we were already using them long before we decided to release Protocol
- // Buffers.
-
- // Is this service deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the service, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating services.
- optional bool deprecated = 33 [default=false];
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-message MethodOptions {
-
- // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
- // framework. We apologize for hoarding these numbers to ourselves, but
- // we were already using them long before we decided to release Protocol
- // Buffers.
-
- // Is this method deprecated?
- // Depending on the target platform, this can emit Deprecated annotations
- // for the method, or it will be completely ignored; in the very least,
- // this is a formalization for deprecating methods.
- optional bool deprecated = 33 [default=false];
-
- // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
- // or neither? HTTP based RPC implementation may choose GET verb for safe
- // methods, and PUT verb for idempotent methods instead of the default POST.
- enum IdempotencyLevel {
- IDEMPOTENCY_UNKNOWN = 0;
- NO_SIDE_EFFECTS = 1; // implies idempotent
- IDEMPOTENT = 2; // idempotent, but may have side effects
- }
- optional IdempotencyLevel idempotency_level =
- 34 [default=IDEMPOTENCY_UNKNOWN];
-
- // The parser stores options it doesn't recognize here. See above.
- repeated UninterpretedOption uninterpreted_option = 999;
-
- // Clients can define custom options in extensions of this message. See above.
- extensions 1000 to max;
-}
-
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-message UninterpretedOption {
- // The name of the uninterpreted option. Each string represents a segment in
- // a dot-separated name. is_extension is true iff a segment represents an
- // extension (denoted with parentheses in options specs in .proto files).
- // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
- // "foo.(bar.baz).qux".
- message NamePart {
- required string name_part = 1;
- required bool is_extension = 2;
- }
- repeated NamePart name = 2;
-
- // The value of the uninterpreted option, in whatever type the tokenizer
- // identified it as during parsing. Exactly one of these should be set.
- optional string identifier_value = 3;
- optional uint64 positive_int_value = 4;
- optional int64 negative_int_value = 5;
- optional double double_value = 6;
- optional bytes string_value = 7;
- optional string aggregate_value = 8;
-}
-
-// ===================================================================
-// Optional source code info
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-message SourceCodeInfo {
- // A Location identifies a piece of source code in a .proto file which
- // corresponds to a particular definition. This information is intended
- // to be useful to IDEs, code indexers, documentation generators, and similar
- // tools.
- //
- // For example, say we have a file like:
- // message Foo {
- // optional string foo = 1;
- // }
- // Let's look at just the field definition:
- // optional string foo = 1;
- // ^ ^^ ^^ ^ ^^^
- // a bc de f ghi
- // We have the following locations:
- // span path represents
- // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
- // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
- // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
- // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
- // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
- //
- // Notes:
- // - A location may refer to a repeated field itself (i.e. not to any
- // particular index within it). This is used whenever a set of elements are
- // logically enclosed in a single code segment. For example, an entire
- // extend block (possibly containing multiple extension definitions) will
- // have an outer location whose path refers to the "extensions" repeated
- // field without an index.
- // - Multiple locations may have the same path. This happens when a single
- // logical declaration is spread out across multiple places. The most
- // obvious example is the "extend" block again -- there may be multiple
- // extend blocks in the same scope, each of which will have the same path.
- // - A location's span is not always a subset of its parent's span. For
- // example, the "extendee" of an extension declaration appears at the
- // beginning of the "extend" block and is shared by all extensions within
- // the block.
- // - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendent. For example, a "group" defines
- // both a type and a field in a single declaration. Thus, the locations
- // corresponding to the type and field and their components will overlap.
- // - Code which tries to interpret locations should probably be designed to
- // ignore those that it doesn't understand, as more types of locations could
- // be recorded in the future.
- repeated Location location = 1;
- message Location {
- // Identifies which part of the FileDescriptorProto was defined at this
- // location.
- //
- // Each element is a field number or an index. They form a path from
- // the root FileDescriptorProto to the place where the definition. For
- // example, this path:
- // [ 4, 3, 2, 7, 1 ]
- // refers to:
- // file.message_type(3) // 4, 3
- // .field(7) // 2, 7
- // .name() // 1
- // This is because FileDescriptorProto.message_type has field number 4:
- // repeated DescriptorProto message_type = 4;
- // and DescriptorProto.field has field number 2:
- // repeated FieldDescriptorProto field = 2;
- // and FieldDescriptorProto.name has field number 1:
- // optional string name = 1;
- //
- // Thus, the above path gives the location of a field name. If we removed
- // the last element:
- // [ 4, 3, 2, 7 ]
- // this path refers to the whole field declaration (from the beginning
- // of the label to the terminating semicolon).
- repeated int32 path = 1 [packed=true];
-
- // Always has exactly three or four elements: start line, start column,
- // end line (optional, otherwise assumed same as start line), end column.
- // These are packed into a single field for efficiency. Note that line
- // and column numbers are zero-based -- typically you will want to add
- // 1 to each before displaying to a user.
- repeated int32 span = 2 [packed=true];
-
- // If this SourceCodeInfo represents a complete declaration, these are any
- // comments appearing before and after the declaration which appear to be
- // attached to the declaration.
- //
- // A series of line comments appearing on consecutive lines, with no other
- // tokens appearing on those lines, will be treated as a single comment.
- //
- // leading_detached_comments will keep paragraphs of comments that appear
- // before (but not connected to) the current element. Each paragraph,
- // separated by empty lines, will be one comment element in the repeated
- // field.
- //
- // Only the comment content is provided; comment markers (e.g. //) are
- // stripped out. For block comments, leading whitespace and an asterisk
- // will be stripped from the beginning of each line other than the first.
- // Newlines are included in the output.
- //
- // Examples:
- //
- // optional int32 foo = 1; // Comment attached to foo.
- // // Comment attached to bar.
- // optional int32 bar = 2;
- //
- // optional string baz = 3;
- // // Comment attached to baz.
- // // Another line attached to baz.
- //
- // // Comment attached to qux.
- // //
- // // Another line attached to qux.
- // optional double qux = 4;
- //
- // // Detached comment for corge. This is not leading or trailing comments
- // // to qux or corge because there are blank lines separating it from
- // // both.
- //
- // // Detached comment for corge paragraph 2.
- //
- // optional string corge = 5;
- // /* Block comment attached
- // * to corge. Leading asterisks
- // * will be removed. */
- // /* Block comment attached to
- // * grault. */
- // optional int32 grault = 6;
- //
- // // ignored detached comments.
- optional string leading_comments = 3;
- optional string trailing_comments = 4;
- repeated string leading_detached_comments = 6;
- }
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-message GeneratedCodeInfo {
- // An Annotation connects some span of text in generated code to an element
- // of its generating .proto file.
- repeated Annotation annotation = 1;
- message Annotation {
- // Identifies the element in the original source .proto file. This field
- // is formatted the same as SourceCodeInfo.Location.path.
- repeated int32 path = 1 [packed=true];
-
- // Identifies the filesystem path to the original source .proto.
- optional string source_file = 2;
-
- // Identifies the starting offset in bytes in the generated code
- // that relates to the identified object.
- optional int32 begin = 3;
-
- // Identifies the ending offset in bytes in the generated code that
- // relates to the identified offset. The end offset should be one past
- // the last relevant byte (so the length of the text = end - begin).
- optional int32 end = 4;
- }
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
deleted file mode 100644
index 6f4a902b5..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
+++ /dev/null
@@ -1,2806 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
- The code generator for the plugin for the Google protocol buffer compiler.
- It generates Go code from the protocol buffer description files read by the
- main routine.
-*/
-package generator
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/printer"
- "go/token"
- "log"
- "os"
- "path"
- "sort"
- "strconv"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap"
-
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
- plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
-)
-
-// generatedCodeVersion indicates a version of the generated code.
-// It is incremented whenever an incompatibility between the generated code and
-// proto package is introduced; the generated code references
-// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
-const generatedCodeVersion = 3
-
-// A Plugin provides functionality to add to the output during Go code generation,
-// such as to produce RPC stubs.
-type Plugin interface {
- // Name identifies the plugin.
- Name() string
- // Init is called once after data structures are built but before
- // code generation begins.
- Init(g *Generator)
- // Generate produces the code generated by the plugin for this file,
- // except for the imports, by calling the generator's methods P, In, and Out.
- Generate(file *FileDescriptor)
- // GenerateImports produces the import declarations for this file.
- // It is called after Generate.
- GenerateImports(file *FileDescriptor)
-}
-
-var plugins []Plugin
-
-// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
-// It is typically called during initialization.
-func RegisterPlugin(p Plugin) {
- plugins = append(plugins, p)
-}
-
-// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf".
-type GoImportPath string
-
-func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
-
-// A GoPackageName is the name of a Go package. e.g., "protobuf".
-type GoPackageName string
-
-// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
-// a pointer to the FileDescriptorProto that represents it. These types achieve that
-// wrapping by placing each Proto inside a struct with the pointer to its File. The
-// structs have the same names as their contents, with "Proto" removed.
-// FileDescriptor is used to store the things that it points to.
-
-// The file and package name method are common to messages and enums.
-type common struct {
- file *FileDescriptor // File this object comes from.
-}
-
-// GoImportPath is the import path of the Go package containing the type.
-func (c *common) GoImportPath() GoImportPath {
- return c.file.importPath
-}
-
-func (c *common) File() *FileDescriptor { return c.file }
-
-func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
- return file.GetSyntax() == "proto3"
-}
-
-func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) }
-
-// Descriptor represents a protocol buffer message.
-type Descriptor struct {
- common
- *descriptor.DescriptorProto
- parent *Descriptor // The containing message, if any.
- nested []*Descriptor // Inner messages, if any.
- enums []*EnumDescriptor // Inner enums, if any.
- ext []*ExtensionDescriptor // Extensions, if any.
- typename []string // Cached typename vector.
- index int // The index into the container, whether the file or another message.
- path string // The SourceCodeInfo path as comma-separated integers.
- group bool
-}
-
-// TypeName returns the elements of the dotted type name.
-// The package name is not part of this name.
-func (d *Descriptor) TypeName() []string {
- if d.typename != nil {
- return d.typename
- }
- n := 0
- for parent := d; parent != nil; parent = parent.parent {
- n++
- }
- s := make([]string, n)
- for parent := d; parent != nil; parent = parent.parent {
- n--
- s[n] = parent.GetName()
- }
- d.typename = s
- return s
-}
-
-// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
-// Otherwise it will be the descriptor of the message in which it is defined.
-type EnumDescriptor struct {
- common
- *descriptor.EnumDescriptorProto
- parent *Descriptor // The containing message, if any.
- typename []string // Cached typename vector.
- index int // The index into the container, whether the file or a message.
- path string // The SourceCodeInfo path as comma-separated integers.
-}
-
-// TypeName returns the elements of the dotted type name.
-// The package name is not part of this name.
-func (e *EnumDescriptor) TypeName() (s []string) {
- if e.typename != nil {
- return e.typename
- }
- name := e.GetName()
- if e.parent == nil {
- s = make([]string, 1)
- } else {
- pname := e.parent.TypeName()
- s = make([]string, len(pname)+1)
- copy(s, pname)
- }
- s[len(s)-1] = name
- e.typename = s
- return s
-}
-
-// Everything but the last element of the full type name, CamelCased.
-// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
-func (e *EnumDescriptor) prefix() string {
- if e.parent == nil {
- // If the enum is not part of a message, the prefix is just the type name.
- return CamelCase(*e.Name) + "_"
- }
- typeName := e.TypeName()
- return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
-}
-
-// The integer value of the named constant in this enumerated type.
-func (e *EnumDescriptor) integerValueAsString(name string) string {
- for _, c := range e.Value {
- if c.GetName() == name {
- return fmt.Sprint(c.GetNumber())
- }
- }
- log.Fatal("cannot find value for enum constant")
- return ""
-}
-
-// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
-// Otherwise it will be the descriptor of the message in which it is defined.
-type ExtensionDescriptor struct {
- common
- *descriptor.FieldDescriptorProto
- parent *Descriptor // The containing message, if any.
-}
-
-// TypeName returns the elements of the dotted type name.
-// The package name is not part of this name.
-func (e *ExtensionDescriptor) TypeName() (s []string) {
- name := e.GetName()
- if e.parent == nil {
- // top-level extension
- s = make([]string, 1)
- } else {
- pname := e.parent.TypeName()
- s = make([]string, len(pname)+1)
- copy(s, pname)
- }
- s[len(s)-1] = name
- return s
-}
-
-// DescName returns the variable name used for the generated descriptor.
-func (e *ExtensionDescriptor) DescName() string {
- // The full type name.
- typeName := e.TypeName()
- // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
- for i, s := range typeName {
- typeName[i] = CamelCase(s)
- }
- return "E_" + strings.Join(typeName, "_")
-}
-
-// ImportedDescriptor describes a type that has been publicly imported from another file.
-type ImportedDescriptor struct {
- common
- o Object
-}
-
-func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
-
-// FileDescriptor describes an protocol buffer descriptor file (.proto).
-// It includes slices of all the messages and enums defined within it.
-// Those slices are constructed by WrapTypes.
-type FileDescriptor struct {
- *descriptor.FileDescriptorProto
- desc []*Descriptor // All the messages defined in this file.
- enum []*EnumDescriptor // All the enums defined in this file.
- ext []*ExtensionDescriptor // All the top-level extensions defined in this file.
- imp []*ImportedDescriptor // All types defined in files publicly imported by this file.
-
- // Comments, stored as a map of path (comma-separated integers) to the comment.
- comments map[string]*descriptor.SourceCodeInfo_Location
-
- // The full list of symbols that are exported,
- // as a map from the exported object to its symbols.
- // This is used for supporting public imports.
- exported map[Object][]symbol
-
- importPath GoImportPath // Import path of this file's package.
- packageName GoPackageName // Name of this file's Go package.
-
- proto3 bool // whether to generate proto3 code for this file
-}
-
-// VarName is the variable name we'll use in the generated code to refer
-// to the compressed bytes of this descriptor. It is not exported, so
-// it is only valid inside the generated package.
-func (d *FileDescriptor) VarName() string {
- h := sha256.Sum256([]byte(d.GetName()))
- return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8]))
-}
-
-// goPackageOption interprets the file's go_package option.
-// If there is no go_package, it returns ("", "", false).
-// If there's a simple name, it returns ("", pkg, true).
-// If the option implies an import path, it returns (impPath, pkg, true).
-func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) {
- opt := d.GetOptions().GetGoPackage()
- if opt == "" {
- return "", "", false
- }
- // A semicolon-delimited suffix delimits the import path and package name.
- sc := strings.Index(opt, ";")
- if sc >= 0 {
- return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true
- }
- // The presence of a slash implies there's an import path.
- slash := strings.LastIndex(opt, "/")
- if slash >= 0 {
- return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true
- }
- return "", cleanPackageName(opt), true
-}
-
-// goFileName returns the output name for the generated Go file.
-func (d *FileDescriptor) goFileName(pathType pathType) string {
- name := *d.Name
- if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
- name = name[:len(name)-len(ext)]
- }
- name += ".pb.go"
-
- if pathType == pathTypeSourceRelative {
- return name
- }
-
- // Does the file have a "go_package" option?
- // If it does, it may override the filename.
- if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
- // Replace the existing dirname with the declared import path.
- _, name = path.Split(name)
- name = path.Join(string(impPath), name)
- return name
- }
-
- return name
-}
-
-func (d *FileDescriptor) addExport(obj Object, sym symbol) {
- d.exported[obj] = append(d.exported[obj], sym)
-}
-
-// symbol is an interface representing an exported Go symbol.
-type symbol interface {
- // GenerateAlias should generate an appropriate alias
- // for the symbol from the named package.
- GenerateAlias(g *Generator, filename string, pkg GoPackageName)
-}
-
-type messageSymbol struct {
- sym string
- hasExtensions, isMessageSet bool
- oneofTypes []string
-}
-
-type getterSymbol struct {
- name string
- typ string
- typeName string // canonical name in proto world; empty for proto.Message and similar
- genType bool // whether typ contains a generated type (message/group/enum)
-}
-
-func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
- g.P("// ", ms.sym, " from public import ", filename)
- g.P("type ", ms.sym, " = ", pkg, ".", ms.sym)
- for _, name := range ms.oneofTypes {
- g.P("type ", name, " = ", pkg, ".", name)
- }
-}
-
-type enumSymbol struct {
- name string
- proto3 bool // Whether this came from a proto3 file.
-}
-
-func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
- s := es.name
- g.P("// ", s, " from public import ", filename)
- g.P("type ", s, " = ", pkg, ".", s)
- g.P("var ", s, "_name = ", pkg, ".", s, "_name")
- g.P("var ", s, "_value = ", pkg, ".", s, "_value")
-}
-
-type constOrVarSymbol struct {
- sym string
- typ string // either "const" or "var"
- cast string // if non-empty, a type cast is required (used for enums)
-}
-
-func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
- v := string(pkg) + "." + cs.sym
- if cs.cast != "" {
- v = cs.cast + "(" + v + ")"
- }
- g.P(cs.typ, " ", cs.sym, " = ", v)
-}
-
-// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
-type Object interface {
- GoImportPath() GoImportPath
- TypeName() []string
- File() *FileDescriptor
-}
-
-// Generator is the type whose methods generate the output, stored in the associated response structure.
-type Generator struct {
- *bytes.Buffer
-
- Request *plugin.CodeGeneratorRequest // The input.
- Response *plugin.CodeGeneratorResponse // The output.
-
- Param map[string]string // Command-line parameters.
- PackageImportPath string // Go import path of the package we're generating code for
- ImportPrefix string // String to prefix to imported package file names.
- ImportMap map[string]string // Mapping from .proto file name to import path
-
- Pkg map[string]string // The names under which we import support packages
-
- outputImportPath GoImportPath // Package we're generating code for.
- allFiles []*FileDescriptor // All files in the tree
- allFilesByName map[string]*FileDescriptor // All files by filename.
- genFiles []*FileDescriptor // Those files we will generate output for.
- file *FileDescriptor // The file we are compiling now.
- packageNames map[GoImportPath]GoPackageName // Imported package names in the current file.
- usedPackages map[GoImportPath]bool // Packages used in current file.
- usedPackageNames map[GoPackageName]bool // Package names used in the current file.
- addedImports map[GoImportPath]bool // Additional imports to emit.
- typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax.
- init []string // Lines to emit in the init function.
- indent string
- pathType pathType // How to generate output filenames.
- writeOutput bool
- annotateCode bool // whether to store annotations
- annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store
-}
-
-type pathType int
-
-const (
- pathTypeImport pathType = iota
- pathTypeSourceRelative
-)
-
-// New creates a new generator and allocates the request and response protobufs.
-func New() *Generator {
- g := new(Generator)
- g.Buffer = new(bytes.Buffer)
- g.Request = new(plugin.CodeGeneratorRequest)
- g.Response = new(plugin.CodeGeneratorResponse)
- return g
-}
-
-// Error reports a problem, including an error, and exits the program.
-func (g *Generator) Error(err error, msgs ...string) {
- s := strings.Join(msgs, " ") + ":" + err.Error()
- log.Print("protoc-gen-go: error:", s)
- os.Exit(1)
-}
-
-// Fail reports a problem and exits the program.
-func (g *Generator) Fail(msgs ...string) {
- s := strings.Join(msgs, " ")
- log.Print("protoc-gen-go: error:", s)
- os.Exit(1)
-}
-
-// CommandLineParameters breaks the comma-separated list of key=value pairs
-// in the parameter (a member of the request protobuf) into a key/value map.
-// It then sets file name mappings defined by those entries.
-func (g *Generator) CommandLineParameters(parameter string) {
- g.Param = make(map[string]string)
- for _, p := range strings.Split(parameter, ",") {
- if i := strings.Index(p, "="); i < 0 {
- g.Param[p] = ""
- } else {
- g.Param[p[0:i]] = p[i+1:]
- }
- }
-
- g.ImportMap = make(map[string]string)
- pluginList := "none" // Default list of plugin names to enable (empty means all).
- for k, v := range g.Param {
- switch k {
- case "import_prefix":
- g.ImportPrefix = v
- case "import_path":
- g.PackageImportPath = v
- case "paths":
- switch v {
- case "import":
- g.pathType = pathTypeImport
- case "source_relative":
- g.pathType = pathTypeSourceRelative
- default:
- g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v))
- }
- case "plugins":
- pluginList = v
- case "annotate_code":
- if v == "true" {
- g.annotateCode = true
- }
- default:
- if len(k) > 0 && k[0] == 'M' {
- g.ImportMap[k[1:]] = v
- }
- }
- }
- if pluginList != "" {
- // Amend the set of plugins.
- enabled := make(map[string]bool)
- for _, name := range strings.Split(pluginList, "+") {
- enabled[name] = true
- }
- var nplugins []Plugin
- for _, p := range plugins {
- if enabled[p.Name()] {
- nplugins = append(nplugins, p)
- }
- }
- plugins = nplugins
- }
-}
-
-// DefaultPackageName returns the package name printed for the object.
-// If its file is in a different package, it returns the package name we're using for this file, plus ".".
-// Otherwise it returns the empty string.
-func (g *Generator) DefaultPackageName(obj Object) string {
- importPath := obj.GoImportPath()
- if importPath == g.outputImportPath {
- return ""
- }
- return string(g.GoPackageName(importPath)) + "."
-}
-
-// GoPackageName returns the name used for a package.
-func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName {
- if name, ok := g.packageNames[importPath]; ok {
- return name
- }
- name := cleanPackageName(baseName(string(importPath)))
- for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ {
- name = orig + GoPackageName(strconv.Itoa(i))
- }
- g.packageNames[importPath] = name
- g.usedPackageNames[name] = true
- return name
-}
-
-// AddImport adds a package to the generated file's import section.
-// It returns the name used for the package.
-func (g *Generator) AddImport(importPath GoImportPath) GoPackageName {
- g.addedImports[importPath] = true
- return g.GoPackageName(importPath)
-}
-
-var globalPackageNames = map[GoPackageName]bool{
- "fmt": true,
- "math": true,
- "proto": true,
-}
-
-// Create and remember a guaranteed unique package name. Pkg is the candidate name.
-// The FileDescriptor parameter is unused.
-func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
- name := cleanPackageName(pkg)
- for i, orig := 1, name; globalPackageNames[name]; i++ {
- name = orig + GoPackageName(strconv.Itoa(i))
- }
- globalPackageNames[name] = true
- return string(name)
-}
-
-var isGoKeyword = map[string]bool{
- "break": true,
- "case": true,
- "chan": true,
- "const": true,
- "continue": true,
- "default": true,
- "else": true,
- "defer": true,
- "fallthrough": true,
- "for": true,
- "func": true,
- "go": true,
- "goto": true,
- "if": true,
- "import": true,
- "interface": true,
- "map": true,
- "package": true,
- "range": true,
- "return": true,
- "select": true,
- "struct": true,
- "switch": true,
- "type": true,
- "var": true,
-}
-
-var isGoPredeclaredIdentifier = map[string]bool{
- "append": true,
- "bool": true,
- "byte": true,
- "cap": true,
- "close": true,
- "complex": true,
- "complex128": true,
- "complex64": true,
- "copy": true,
- "delete": true,
- "error": true,
- "false": true,
- "float32": true,
- "float64": true,
- "imag": true,
- "int": true,
- "int16": true,
- "int32": true,
- "int64": true,
- "int8": true,
- "iota": true,
- "len": true,
- "make": true,
- "new": true,
- "nil": true,
- "panic": true,
- "print": true,
- "println": true,
- "real": true,
- "recover": true,
- "rune": true,
- "string": true,
- "true": true,
- "uint": true,
- "uint16": true,
- "uint32": true,
- "uint64": true,
- "uint8": true,
- "uintptr": true,
-}
-
-func cleanPackageName(name string) GoPackageName {
- name = strings.Map(badToUnderscore, name)
- // Identifier must not be keyword or predeclared identifier: insert _.
- if isGoKeyword[name] {
- name = "_" + name
- }
- // Identifier must not begin with digit: insert _.
- if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) {
- name = "_" + name
- }
- return GoPackageName(name)
-}
-
-// defaultGoPackage returns the package name to use,
-// derived from the import path of the package we're building code for.
-func (g *Generator) defaultGoPackage() GoPackageName {
- p := g.PackageImportPath
- if i := strings.LastIndex(p, "/"); i >= 0 {
- p = p[i+1:]
- }
- return cleanPackageName(p)
-}
-
-// SetPackageNames sets the package name for this run.
-// The package name must agree across all files being generated.
-// It also defines unique package names for all imported files.
-func (g *Generator) SetPackageNames() {
- g.outputImportPath = g.genFiles[0].importPath
-
- defaultPackageNames := make(map[GoImportPath]GoPackageName)
- for _, f := range g.genFiles {
- if _, p, ok := f.goPackageOption(); ok {
- defaultPackageNames[f.importPath] = p
- }
- }
- for _, f := range g.genFiles {
- if _, p, ok := f.goPackageOption(); ok {
- // Source file: option go_package = "quux/bar";
- f.packageName = p
- } else if p, ok := defaultPackageNames[f.importPath]; ok {
- // A go_package option in another file in the same package.
- //
- // This is a poor choice in general, since every source file should
- // contain a go_package option. Supported mainly for historical
- // compatibility.
- f.packageName = p
- } else if p := g.defaultGoPackage(); p != "" {
- // Command-line: import_path=quux/bar.
- //
- // The import_path flag sets a package name for files which don't
- // contain a go_package option.
- f.packageName = p
- } else if p := f.GetPackage(); p != "" {
- // Source file: package quux.bar;
- f.packageName = cleanPackageName(p)
- } else {
- // Source filename.
- f.packageName = cleanPackageName(baseName(f.GetName()))
- }
- }
-
- // Check that all files have a consistent package name and import path.
- for _, f := range g.genFiles[1:] {
- if a, b := g.genFiles[0].importPath, f.importPath; a != b {
- g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b))
- }
- if a, b := g.genFiles[0].packageName, f.packageName; a != b {
- g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b))
- }
- }
-
- // Names of support packages. These never vary (if there are conflicts,
- // we rename the conflicting package), so this could be removed someday.
- g.Pkg = map[string]string{
- "fmt": "fmt",
- "math": "math",
- "proto": "proto",
- }
-}
-
-// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
-// and FileDescriptorProtos into file-referenced objects within the Generator.
-// It also creates the list of files to generate and so should be called before GenerateAllFiles.
-func (g *Generator) WrapTypes() {
- g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
- g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
- genFileNames := make(map[string]bool)
- for _, n := range g.Request.FileToGenerate {
- genFileNames[n] = true
- }
- for _, f := range g.Request.ProtoFile {
- fd := &FileDescriptor{
- FileDescriptorProto: f,
- exported: make(map[Object][]symbol),
- proto3: fileIsProto3(f),
- }
- // The import path may be set in a number of ways.
- if substitution, ok := g.ImportMap[f.GetName()]; ok {
- // Command-line: M=foo.proto=quux/bar.
- //
- // Explicit mapping of source file to import path.
- fd.importPath = GoImportPath(substitution)
- } else if genFileNames[f.GetName()] && g.PackageImportPath != "" {
- // Command-line: import_path=quux/bar.
- //
- // The import_path flag sets the import path for every file that
- // we generate code for.
- fd.importPath = GoImportPath(g.PackageImportPath)
- } else if p, _, _ := fd.goPackageOption(); p != "" {
- // Source file: option go_package = "quux/bar";
- //
- // The go_package option sets the import path. Most users should use this.
- fd.importPath = p
- } else {
- // Source filename.
- //
- // Last resort when nothing else is available.
- fd.importPath = GoImportPath(path.Dir(f.GetName()))
- }
- // We must wrap the descriptors before we wrap the enums
- fd.desc = wrapDescriptors(fd)
- g.buildNestedDescriptors(fd.desc)
- fd.enum = wrapEnumDescriptors(fd, fd.desc)
- g.buildNestedEnums(fd.desc, fd.enum)
- fd.ext = wrapExtensions(fd)
- extractComments(fd)
- g.allFiles = append(g.allFiles, fd)
- g.allFilesByName[f.GetName()] = fd
- }
- for _, fd := range g.allFiles {
- fd.imp = wrapImported(fd, g)
- }
-
- g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
- for _, fileName := range g.Request.FileToGenerate {
- fd := g.allFilesByName[fileName]
- if fd == nil {
- g.Fail("could not find file named", fileName)
- }
- g.genFiles = append(g.genFiles, fd)
- }
-}
-
-// Scan the descriptors in this file. For each one, build the slice of nested descriptors
-func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
- for _, desc := range descs {
- if len(desc.NestedType) != 0 {
- for _, nest := range descs {
- if nest.parent == desc {
- desc.nested = append(desc.nested, nest)
- }
- }
- if len(desc.nested) != len(desc.NestedType) {
- g.Fail("internal error: nesting failure for", desc.GetName())
- }
- }
- }
-}
-
-func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
- for _, desc := range descs {
- if len(desc.EnumType) != 0 {
- for _, enum := range enums {
- if enum.parent == desc {
- desc.enums = append(desc.enums, enum)
- }
- }
- if len(desc.enums) != len(desc.EnumType) {
- g.Fail("internal error: enum nesting failure for", desc.GetName())
- }
- }
- }
-}
-
-// Construct the Descriptor
-func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor {
- d := &Descriptor{
- common: common{file},
- DescriptorProto: desc,
- parent: parent,
- index: index,
- }
- if parent == nil {
- d.path = fmt.Sprintf("%d,%d", messagePath, index)
- } else {
- d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
- }
-
- // The only way to distinguish a group from a message is whether
- // the containing message has a TYPE_GROUP field that matches.
- if parent != nil {
- parts := d.TypeName()
- if file.Package != nil {
- parts = append([]string{*file.Package}, parts...)
- }
- exp := "." + strings.Join(parts, ".")
- for _, field := range parent.Field {
- if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
- d.group = true
- break
- }
- }
- }
-
- for _, field := range desc.Extension {
- d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
- }
-
- return d
-}
-
-// Return a slice of all the Descriptors defined within this file
-func wrapDescriptors(file *FileDescriptor) []*Descriptor {
- sl := make([]*Descriptor, 0, len(file.MessageType)+10)
- for i, desc := range file.MessageType {
- sl = wrapThisDescriptor(sl, desc, nil, file, i)
- }
- return sl
-}
-
-// Wrap this Descriptor, recursively
-func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor {
- sl = append(sl, newDescriptor(desc, parent, file, index))
- me := sl[len(sl)-1]
- for i, nested := range desc.NestedType {
- sl = wrapThisDescriptor(sl, nested, me, file, i)
- }
- return sl
-}
-
-// Construct the EnumDescriptor
-func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor {
- ed := &EnumDescriptor{
- common: common{file},
- EnumDescriptorProto: desc,
- parent: parent,
- index: index,
- }
- if parent == nil {
- ed.path = fmt.Sprintf("%d,%d", enumPath, index)
- } else {
- ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
- }
- return ed
-}
-
-// Return a slice of all the EnumDescriptors defined within this file
-func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor {
- sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
- // Top-level enums.
- for i, enum := range file.EnumType {
- sl = append(sl, newEnumDescriptor(enum, nil, file, i))
- }
- // Enums within messages. Enums within embedded messages appear in the outer-most message.
- for _, nested := range descs {
- for i, enum := range nested.EnumType {
- sl = append(sl, newEnumDescriptor(enum, nested, file, i))
- }
- }
- return sl
-}
-
-// Return a slice of all the top-level ExtensionDescriptors defined within this file.
-func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor {
- var sl []*ExtensionDescriptor
- for _, field := range file.Extension {
- sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
- }
- return sl
-}
-
-// Return a slice of all the types that are publicly imported into this file.
-func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) {
- for _, index := range file.PublicDependency {
- df := g.fileByName(file.Dependency[index])
- for _, d := range df.desc {
- if d.GetOptions().GetMapEntry() {
- continue
- }
- sl = append(sl, &ImportedDescriptor{common{file}, d})
- }
- for _, e := range df.enum {
- sl = append(sl, &ImportedDescriptor{common{file}, e})
- }
- for _, ext := range df.ext {
- sl = append(sl, &ImportedDescriptor{common{file}, ext})
- }
- }
- return
-}
-
-func extractComments(file *FileDescriptor) {
- file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
- for _, loc := range file.GetSourceCodeInfo().GetLocation() {
- if loc.LeadingComments == nil {
- continue
- }
- var p []string
- for _, n := range loc.Path {
- p = append(p, strconv.Itoa(int(n)))
- }
- file.comments[strings.Join(p, ",")] = loc
- }
-}
-
-// BuildTypeNameMap builds the map from fully qualified type names to objects.
-// The key names for the map come from the input data, which puts a period at the beginning.
-// It should be called after SetPackageNames and before GenerateAllFiles.
-func (g *Generator) BuildTypeNameMap() {
- g.typeNameToObject = make(map[string]Object)
- for _, f := range g.allFiles {
- // The names in this loop are defined by the proto world, not us, so the
- // package name may be empty. If so, the dotted package name of X will
- // be ".X"; otherwise it will be ".pkg.X".
- dottedPkg := "." + f.GetPackage()
- if dottedPkg != "." {
- dottedPkg += "."
- }
- for _, enum := range f.enum {
- name := dottedPkg + dottedSlice(enum.TypeName())
- g.typeNameToObject[name] = enum
- }
- for _, desc := range f.desc {
- name := dottedPkg + dottedSlice(desc.TypeName())
- g.typeNameToObject[name] = desc
- }
- }
-}
-
-// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
-// returns the descriptor for the message or enum with that name.
-func (g *Generator) ObjectNamed(typeName string) Object {
- o, ok := g.typeNameToObject[typeName]
- if !ok {
- g.Fail("can't find object with type", typeName)
- }
- return o
-}
-
-// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated.
-type AnnotatedAtoms struct {
- source string
- path string
- atoms []interface{}
-}
-
-// Annotate records the file name and proto AST path of a list of atoms
-// so that a later call to P can emit a link from each atom to its origin.
-func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms {
- return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms}
-}
-
-// printAtom prints the (atomic, non-annotation) argument to the generated output.
-func (g *Generator) printAtom(v interface{}) {
- switch v := v.(type) {
- case string:
- g.WriteString(v)
- case *string:
- g.WriteString(*v)
- case bool:
- fmt.Fprint(g, v)
- case *bool:
- fmt.Fprint(g, *v)
- case int:
- fmt.Fprint(g, v)
- case *int32:
- fmt.Fprint(g, *v)
- case *int64:
- fmt.Fprint(g, *v)
- case float64:
- fmt.Fprint(g, v)
- case *float64:
- fmt.Fprint(g, *v)
- case GoPackageName:
- g.WriteString(string(v))
- case GoImportPath:
- g.WriteString(strconv.Quote(string(v)))
- default:
- g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
- }
-}
-
-// P prints the arguments to the generated output. It handles strings and int32s, plus
-// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit
-// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode
-// is true).
-func (g *Generator) P(str ...interface{}) {
- if !g.writeOutput {
- return
- }
- g.WriteString(g.indent)
- for _, v := range str {
- switch v := v.(type) {
- case *AnnotatedAtoms:
- begin := int32(g.Len())
- for _, v := range v.atoms {
- g.printAtom(v)
- }
- if g.annotateCode {
- end := int32(g.Len())
- var path []int32
- for _, token := range strings.Split(v.path, ",") {
- val, err := strconv.ParseInt(token, 10, 32)
- if err != nil {
- g.Fail("could not parse proto AST path: ", err.Error())
- }
- path = append(path, int32(val))
- }
- g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{
- Path: path,
- SourceFile: &v.source,
- Begin: &begin,
- End: &end,
- })
- }
- default:
- g.printAtom(v)
- }
- }
- g.WriteByte('\n')
-}
-
-// addInitf stores the given statement to be printed inside the file's init function.
-// The statement is given as a format specifier and arguments.
-func (g *Generator) addInitf(stmt string, a ...interface{}) {
- g.init = append(g.init, fmt.Sprintf(stmt, a...))
-}
-
-// In Indents the output one tab stop.
-func (g *Generator) In() { g.indent += "\t" }
-
-// Out unindents the output one tab stop.
-func (g *Generator) Out() {
- if len(g.indent) > 0 {
- g.indent = g.indent[1:]
- }
-}
-
-// GenerateAllFiles generates the output for all the files we're outputting.
-func (g *Generator) GenerateAllFiles() {
- // Initialize the plugins
- for _, p := range plugins {
- p.Init(g)
- }
- // Generate the output. The generator runs for every file, even the files
- // that we don't generate output for, so that we can collate the full list
- // of exported symbols to support public imports.
- genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
- for _, file := range g.genFiles {
- genFileMap[file] = true
- }
- for _, file := range g.allFiles {
- g.Reset()
- g.annotations = nil
- g.writeOutput = genFileMap[file]
- g.generate(file)
- if !g.writeOutput {
- continue
- }
- fname := file.goFileName(g.pathType)
- g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
- Name: proto.String(fname),
- Content: proto.String(g.String()),
- })
- if g.annotateCode {
- // Store the generated code annotations in text, as the protoc plugin protocol requires that
- // strings contain valid UTF-8.
- g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
- Name: proto.String(file.goFileName(g.pathType) + ".meta"),
- Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})),
- })
- }
- }
-}
-
-// Run all the plugins associated with the file.
-func (g *Generator) runPlugins(file *FileDescriptor) {
- for _, p := range plugins {
- p.Generate(file)
- }
-}
-
-// Fill the response protocol buffer with the generated output for all the files we're
-// supposed to generate.
-func (g *Generator) generate(file *FileDescriptor) {
- g.file = file
- g.usedPackages = make(map[GoImportPath]bool)
- g.packageNames = make(map[GoImportPath]GoPackageName)
- g.usedPackageNames = make(map[GoPackageName]bool)
- g.addedImports = make(map[GoImportPath]bool)
- for name := range globalPackageNames {
- g.usedPackageNames[name] = true
- }
-
- g.P("// This is a compile-time assertion to ensure that this generated file")
- g.P("// is compatible with the proto package it is being compiled against.")
- g.P("// A compilation error at this line likely means your copy of the")
- g.P("// proto package needs to be updated.")
- g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
- g.P()
-
- for _, td := range g.file.imp {
- g.generateImported(td)
- }
- for _, enum := range g.file.enum {
- g.generateEnum(enum)
- }
- for _, desc := range g.file.desc {
- // Don't generate virtual messages for maps.
- if desc.GetOptions().GetMapEntry() {
- continue
- }
- g.generateMessage(desc)
- }
- for _, ext := range g.file.ext {
- g.generateExtension(ext)
- }
- g.generateInitFunction()
- g.generateFileDescriptor(file)
-
- // Run the plugins before the imports so we know which imports are necessary.
- g.runPlugins(file)
-
- // Generate header and imports last, though they appear first in the output.
- rem := g.Buffer
- remAnno := g.annotations
- g.Buffer = new(bytes.Buffer)
- g.annotations = nil
- g.generateHeader()
- g.generateImports()
- if !g.writeOutput {
- return
- }
- // Adjust the offsets for annotations displaced by the header and imports.
- for _, anno := range remAnno {
- *anno.Begin += int32(g.Len())
- *anno.End += int32(g.Len())
- g.annotations = append(g.annotations, anno)
- }
- g.Write(rem.Bytes())
-
- // Reformat generated code and patch annotation locations.
- fset := token.NewFileSet()
- original := g.Bytes()
- if g.annotateCode {
- // make a copy independent of g; we'll need it after Reset.
- original = append([]byte(nil), original...)
- }
- fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments)
- if err != nil {
- // Print out the bad code with line numbers.
- // This should never happen in practice, but it can while changing generated code,
- // so consider this a debugging aid.
- var src bytes.Buffer
- s := bufio.NewScanner(bytes.NewReader(original))
- for line := 1; s.Scan(); line++ {
- fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
- }
- g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
- }
- ast.SortImports(fset, fileAST)
- g.Reset()
- err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST)
- if err != nil {
- g.Fail("generated Go source code could not be reformatted:", err.Error())
- }
- if g.annotateCode {
- m, err := remap.Compute(original, g.Bytes())
- if err != nil {
- g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error())
- }
- for _, anno := range g.annotations {
- new, ok := m.Find(int(*anno.Begin), int(*anno.End))
- if !ok {
- g.Fail("span in formatted generated Go source code could not be mapped back to the original code")
- }
- *anno.Begin = int32(new.Pos)
- *anno.End = int32(new.End)
- }
- }
-}
-
-// Generate the header, including package definition
-func (g *Generator) generateHeader() {
- g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
- if g.file.GetOptions().GetDeprecated() {
- g.P("// ", g.file.Name, " is a deprecated file.")
- } else {
- g.P("// source: ", g.file.Name)
- }
- g.P()
- g.PrintComments(strconv.Itoa(packagePath))
- g.P()
- g.P("package ", g.file.packageName)
- g.P()
-}
-
-// deprecationComment is the standard comment added to deprecated
-// messages, fields, enums, and enum values.
-var deprecationComment = "// Deprecated: Do not use."
-
-// PrintComments prints any comments from the source .proto file.
-// The path is a comma-separated list of integers.
-// It returns an indication of whether any comments were printed.
-// See descriptor.proto for its format.
-func (g *Generator) PrintComments(path string) bool {
- if !g.writeOutput {
- return false
- }
- if c, ok := g.makeComments(path); ok {
- g.P(c)
- return true
- }
- return false
-}
-
-// makeComments generates the comment string for the field, no "\n" at the end
-func (g *Generator) makeComments(path string) (string, bool) {
- loc, ok := g.file.comments[path]
- if !ok {
- return "", false
- }
- w := new(bytes.Buffer)
- nl := ""
- for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") {
- fmt.Fprintf(w, "%s//%s", nl, line)
- nl = "\n"
- }
- return w.String(), true
-}
-
-func (g *Generator) fileByName(filename string) *FileDescriptor {
- return g.allFilesByName[filename]
-}
-
-// weak returns whether the ith import of the current file is a weak import.
-func (g *Generator) weak(i int32) bool {
- for _, j := range g.file.WeakDependency {
- if j == i {
- return true
- }
- }
- return false
-}
-
-// Generate the imports
-func (g *Generator) generateImports() {
- imports := make(map[GoImportPath]GoPackageName)
- for i, s := range g.file.Dependency {
- fd := g.fileByName(s)
- importPath := fd.importPath
- // Do not import our own package.
- if importPath == g.file.importPath {
- continue
- }
- // Do not import weak imports.
- if g.weak(int32(i)) {
- continue
- }
- // Do not import a package twice.
- if _, ok := imports[importPath]; ok {
- continue
- }
- // We need to import all the dependencies, even if we don't reference them,
- // because other code and tools depend on having the full transitive closure
- // of protocol buffer types in the binary.
- packageName := g.GoPackageName(importPath)
- if _, ok := g.usedPackages[importPath]; !ok {
- packageName = "_"
- }
- imports[importPath] = packageName
- }
- for importPath := range g.addedImports {
- imports[importPath] = g.GoPackageName(importPath)
- }
- // We almost always need a proto import. Rather than computing when we
- // do, which is tricky when there's a plugin, just import it and
- // reference it later. The same argument applies to the fmt and math packages.
- g.P("import (")
- g.P(g.Pkg["fmt"] + ` "fmt"`)
- g.P(g.Pkg["math"] + ` "math"`)
- g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto")
- for importPath, packageName := range imports {
- g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath)
- }
- g.P(")")
- g.P()
- // TODO: may need to worry about uniqueness across plugins
- for _, p := range plugins {
- p.GenerateImports(g.file)
- g.P()
- }
- g.P("// Reference imports to suppress errors if they are not otherwise used.")
- g.P("var _ = ", g.Pkg["proto"], ".Marshal")
- g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
- g.P("var _ = ", g.Pkg["math"], ".Inf")
- g.P()
-}
-
-func (g *Generator) generateImported(id *ImportedDescriptor) {
- df := id.o.File()
- filename := *df.Name
- if df.importPath == g.file.importPath {
- // Don't generate type aliases for files in the same Go package as this one.
- return
- }
- if !supportTypeAliases {
- g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename))
- }
- g.usedPackages[df.importPath] = true
-
- for _, sym := range df.exported[id.o] {
- sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath))
- }
-
- g.P()
-}
-
-// Generate the enum definitions for this EnumDescriptor.
-func (g *Generator) generateEnum(enum *EnumDescriptor) {
- // The full type name
- typeName := enum.TypeName()
- // The full type name, CamelCased.
- ccTypeName := CamelCaseSlice(typeName)
- ccPrefix := enum.prefix()
-
- deprecatedEnum := ""
- if enum.GetOptions().GetDeprecated() {
- deprecatedEnum = deprecationComment
- }
- g.PrintComments(enum.path)
- g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum)
- g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
- g.P("const (")
- for i, e := range enum.Value {
- etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)
- g.PrintComments(etorPath)
-
- deprecatedValue := ""
- if e.GetOptions().GetDeprecated() {
- deprecatedValue = deprecationComment
- }
-
- name := ccPrefix + *e.Name
- g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue)
- g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
- }
- g.P(")")
- g.P()
- g.P("var ", ccTypeName, "_name = map[int32]string{")
- generated := make(map[int32]bool) // avoid duplicate values
- for _, e := range enum.Value {
- duplicate := ""
- if _, present := generated[*e.Number]; present {
- duplicate = "// Duplicate value: "
- }
- g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
- generated[*e.Number] = true
- }
- g.P("}")
- g.P()
- g.P("var ", ccTypeName, "_value = map[string]int32{")
- for _, e := range enum.Value {
- g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
- }
- g.P("}")
- g.P()
-
- if !enum.proto3() {
- g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
- g.P("p := new(", ccTypeName, ")")
- g.P("*p = x")
- g.P("return p")
- g.P("}")
- g.P()
- }
-
- g.P("func (x ", ccTypeName, ") String() string {")
- g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
- g.P("}")
- g.P()
-
- if !enum.proto3() {
- g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
- g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
- g.P("if err != nil {")
- g.P("return err")
- g.P("}")
- g.P("*x = ", ccTypeName, "(value)")
- g.P("return nil")
- g.P("}")
- g.P()
- }
-
- var indexes []string
- for m := enum.parent; m != nil; m = m.parent {
- // XXX: skip groups?
- indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
- }
- indexes = append(indexes, strconv.Itoa(enum.index))
- g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {")
- g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
- g.P("}")
- g.P()
- if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
- g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
- g.P()
- }
-
- g.generateEnumRegistration(enum)
-}
-
-// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
-// identifies details of the field for the protocol buffer marshaling and unmarshaling
-// code. The fields are:
-// wire encoding
-// protocol tag number
-// opt,req,rep for optional, required, or repeated
-// packed whether the encoding is "packed" (optional; repeated primitives only)
-// name= the original declared name
-// enum= the name of the enum type if it is an enum-typed field.
-// proto3 if this field is in a proto3 message
-// def= string representation of the default value, if any.
-// The default value must be in a representation that can be used at run-time
-// to generate the default value. Thus bools become 0 and 1, for instance.
-func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
- optrepreq := ""
- switch {
- case isOptional(field):
- optrepreq = "opt"
- case isRequired(field):
- optrepreq = "req"
- case isRepeated(field):
- optrepreq = "rep"
- }
- var defaultValue string
- if dv := field.DefaultValue; dv != nil { // set means an explicit default
- defaultValue = *dv
- // Some types need tweaking.
- switch *field.Type {
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
- if defaultValue == "true" {
- defaultValue = "1"
- } else {
- defaultValue = "0"
- }
- case descriptor.FieldDescriptorProto_TYPE_STRING,
- descriptor.FieldDescriptorProto_TYPE_BYTES:
- // Nothing to do. Quoting is done for the whole tag.
- case descriptor.FieldDescriptorProto_TYPE_ENUM:
- // For enums we need to provide the integer constant.
- obj := g.ObjectNamed(field.GetTypeName())
- if id, ok := obj.(*ImportedDescriptor); ok {
- // It is an enum that was publicly imported.
- // We need the underlying type.
- obj = id.o
- }
- enum, ok := obj.(*EnumDescriptor)
- if !ok {
- log.Printf("obj is a %T", obj)
- if id, ok := obj.(*ImportedDescriptor); ok {
- log.Printf("id.o is a %T", id.o)
- }
- g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
- }
- defaultValue = enum.integerValueAsString(defaultValue)
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
- if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
- if f, err := strconv.ParseFloat(defaultValue, 32); err == nil {
- defaultValue = fmt.Sprint(float32(f))
- }
- }
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
- if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
- if f, err := strconv.ParseFloat(defaultValue, 64); err == nil {
- defaultValue = fmt.Sprint(f)
- }
- }
- }
- defaultValue = ",def=" + defaultValue
- }
- enum := ""
- if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
- // We avoid using obj.GoPackageName(), because we want to use the
- // original (proto-world) package name.
- obj := g.ObjectNamed(field.GetTypeName())
- if id, ok := obj.(*ImportedDescriptor); ok {
- obj = id.o
- }
- enum = ",enum="
- if pkg := obj.File().GetPackage(); pkg != "" {
- enum += pkg + "."
- }
- enum += CamelCaseSlice(obj.TypeName())
- }
- packed := ""
- if (field.Options != nil && field.Options.GetPacked()) ||
- // Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
- // "In proto3, repeated fields of scalar numeric types use packed encoding by default."
- (message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
- isRepeated(field) && isScalar(field)) {
- packed = ",packed"
- }
- fieldName := field.GetName()
- name := fieldName
- if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
- // We must use the type name for groups instead of
- // the field name to preserve capitalization.
- // type_name in FieldDescriptorProto is fully-qualified,
- // but we only want the local part.
- name = *field.TypeName
- if i := strings.LastIndex(name, "."); i >= 0 {
- name = name[i+1:]
- }
- }
- if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name {
- // TODO: escaping might be needed, in which case
- // perhaps this should be in its own "json" tag.
- name += ",json=" + json
- }
- name = ",name=" + name
- if message.proto3() {
- name += ",proto3"
- }
- oneof := ""
- if field.OneofIndex != nil {
- oneof = ",oneof"
- }
- return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
- wiretype,
- field.GetNumber(),
- optrepreq,
- packed,
- name,
- enum,
- oneof,
- defaultValue))
-}
-
-func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
- switch typ {
- case descriptor.FieldDescriptorProto_TYPE_GROUP:
- return false
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
- return false
- case descriptor.FieldDescriptorProto_TYPE_BYTES:
- return false
- }
- return true
-}
-
-// TypeName is the printed name appropriate for an item. If the object is in the current file,
-// TypeName drops the package name and underscores the rest.
-// Otherwise the object is from another package; and the result is the underscored
-// package name followed by the item name.
-// The result always has an initial capital.
-func (g *Generator) TypeName(obj Object) string {
- return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
-}
-
-// GoType returns a string representing the type name, and the wire type
-func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
- // TODO: Options.
- switch *field.Type {
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
- typ, wire = "float64", "fixed64"
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
- typ, wire = "float32", "fixed32"
- case descriptor.FieldDescriptorProto_TYPE_INT64:
- typ, wire = "int64", "varint"
- case descriptor.FieldDescriptorProto_TYPE_UINT64:
- typ, wire = "uint64", "varint"
- case descriptor.FieldDescriptorProto_TYPE_INT32:
- typ, wire = "int32", "varint"
- case descriptor.FieldDescriptorProto_TYPE_UINT32:
- typ, wire = "uint32", "varint"
- case descriptor.FieldDescriptorProto_TYPE_FIXED64:
- typ, wire = "uint64", "fixed64"
- case descriptor.FieldDescriptorProto_TYPE_FIXED32:
- typ, wire = "uint32", "fixed32"
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
- typ, wire = "bool", "varint"
- case descriptor.FieldDescriptorProto_TYPE_STRING:
- typ, wire = "string", "bytes"
- case descriptor.FieldDescriptorProto_TYPE_GROUP:
- desc := g.ObjectNamed(field.GetTypeName())
- typ, wire = "*"+g.TypeName(desc), "group"
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
- desc := g.ObjectNamed(field.GetTypeName())
- typ, wire = "*"+g.TypeName(desc), "bytes"
- case descriptor.FieldDescriptorProto_TYPE_BYTES:
- typ, wire = "[]byte", "bytes"
- case descriptor.FieldDescriptorProto_TYPE_ENUM:
- desc := g.ObjectNamed(field.GetTypeName())
- typ, wire = g.TypeName(desc), "varint"
- case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
- typ, wire = "int32", "fixed32"
- case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
- typ, wire = "int64", "fixed64"
- case descriptor.FieldDescriptorProto_TYPE_SINT32:
- typ, wire = "int32", "zigzag32"
- case descriptor.FieldDescriptorProto_TYPE_SINT64:
- typ, wire = "int64", "zigzag64"
- default:
- g.Fail("unknown type for", field.GetName())
- }
- if isRepeated(field) {
- typ = "[]" + typ
- } else if message != nil && message.proto3() {
- return
- } else if field.OneofIndex != nil && message != nil {
- return
- } else if needsStar(*field.Type) {
- typ = "*" + typ
- }
- return
-}
-
-func (g *Generator) RecordTypeUse(t string) {
- if _, ok := g.typeNameToObject[t]; !ok {
- return
- }
- importPath := g.ObjectNamed(t).GoImportPath()
- if importPath == g.outputImportPath {
- // Don't record use of objects in our package.
- return
- }
- g.AddImport(importPath)
- g.usedPackages[importPath] = true
-}
-
-// Method names that may be generated. Fields with these names get an
-// underscore appended. Any change to this set is a potential incompatible
-// API change because it changes generated field names.
-var methodNames = [...]string{
- "Reset",
- "String",
- "ProtoMessage",
- "Marshal",
- "Unmarshal",
- "ExtensionRangeArray",
- "ExtensionMap",
- "Descriptor",
-}
-
-// Names of messages in the `google.protobuf` package for which
-// we will generate XXX_WellKnownType methods.
-var wellKnownTypes = map[string]bool{
- "Any": true,
- "Duration": true,
- "Empty": true,
- "Struct": true,
- "Timestamp": true,
-
- "Value": true,
- "ListValue": true,
- "DoubleValue": true,
- "FloatValue": true,
- "Int64Value": true,
- "UInt64Value": true,
- "Int32Value": true,
- "UInt32Value": true,
- "BoolValue": true,
- "StringValue": true,
- "BytesValue": true,
-}
-
-// getterDefault finds the default value for the field to return from a getter,
-// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName"
-func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string {
- if isRepeated(field) {
- return "nil"
- }
- if def := field.GetDefaultValue(); def != "" {
- defaultConstant := g.defaultConstantName(goMessageType, field.GetName())
- if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
- return defaultConstant
- }
- return "append([]byte(nil), " + defaultConstant + "...)"
- }
- switch *field.Type {
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
- return "false"
- case descriptor.FieldDescriptorProto_TYPE_STRING:
- return `""`
- case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES:
- return "nil"
- case descriptor.FieldDescriptorProto_TYPE_ENUM:
- obj := g.ObjectNamed(field.GetTypeName())
- var enum *EnumDescriptor
- if id, ok := obj.(*ImportedDescriptor); ok {
- // The enum type has been publicly imported.
- enum, _ = id.o.(*EnumDescriptor)
- } else {
- enum, _ = obj.(*EnumDescriptor)
- }
- if enum == nil {
- log.Printf("don't know how to generate getter for %s", field.GetName())
- return "nil"
- }
- if len(enum.Value) == 0 {
- return "0 // empty enum"
- }
- first := enum.Value[0].GetName()
- return g.DefaultPackageName(obj) + enum.prefix() + first
- default:
- return "0"
- }
-}
-
-// defaultConstantName builds the name of the default constant from the message
-// type name and the untouched field name, e.g. "Default_MessageType_FieldName"
-func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string {
- return "Default_" + goMessageType + "_" + CamelCase(protoFieldName)
-}
-
-// The different types of fields in a message and how to actually print them
-// Most of the logic for generateMessage is in the methods of these types.
-//
-// Note that the content of the field is irrelevant, a simpleField can contain
-// anything from a scalar to a group (which is just a message).
-//
-// Extension fields (and message sets) are however handled separately.
-//
-// simpleField - a field that is neiter weak nor oneof, possibly repeated
-// oneofField - field containing list of subfields:
-// - oneofSubField - a field within the oneof
-
-// msgCtx contains the context for the generator functions.
-type msgCtx struct {
- goName string // Go struct name of the message, e.g. MessageName
- message *Descriptor // The descriptor for the message
-}
-
-// fieldCommon contains data common to all types of fields.
-type fieldCommon struct {
- goName string // Go name of field, e.g. "FieldName" or "Descriptor_"
- protoName string // Name of field in proto language, e.g. "field_name" or "descriptor"
- getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_"
- goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage"
- tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"`
- fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0"
-}
-
-// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor".
-func (f *fieldCommon) getProtoName() string {
- return f.protoName
-}
-
-// getGoType returns the go type of the field as a string, e.g. "*int32".
-func (f *fieldCommon) getGoType() string {
- return f.goType
-}
-
-// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated.
-type simpleField struct {
- fieldCommon
- protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
- protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
- deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use."
- getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
- protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5"
- comment string // The full comment for the field, e.g. "// Useful information"
-}
-
-// decl prints the declaration of the field in the struct (if any).
-func (f *simpleField) decl(g *Generator, mc *msgCtx) {
- g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated)
-}
-
-// getter prints the getter for the field.
-func (f *simpleField) getter(g *Generator, mc *msgCtx) {
- star := ""
- tname := f.goType
- if needsStar(f.protoType) && tname[0] == '*' {
- tname = tname[1:]
- star = "*"
- }
- if f.deprecated != "" {
- g.P(f.deprecated)
- }
- g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {")
- if f.getterDef == "nil" { // Simpler getter
- g.P("if m != nil {")
- g.P("return m." + f.goName)
- g.P("}")
- g.P("return nil")
- g.P("}")
- g.P()
- return
- }
- if mc.message.proto3() {
- g.P("if m != nil {")
- } else {
- g.P("if m != nil && m." + f.goName + " != nil {")
- }
- g.P("return " + star + "m." + f.goName)
- g.P("}")
- g.P("return ", f.getterDef)
- g.P("}")
- g.P()
-}
-
-// setter prints the setter method of the field.
-func (f *simpleField) setter(g *Generator, mc *msgCtx) {
- // No setter for regular fields yet
-}
-
-// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
-func (f *simpleField) getProtoDef() string {
- return f.protoDef
-}
-
-// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
-func (f *simpleField) getProtoTypeName() string {
- return f.protoTypeName
-}
-
-// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
-func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type {
- return f.protoType
-}
-
-// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message.
-type oneofSubField struct {
- fieldCommon
- protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
- protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
- oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName"
- fieldNumber int // Actual field number, as defined in proto, e.g. 12
- getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
- protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5"
- deprecated string // Deprecation comment, if any.
-}
-
-// typedNil prints a nil casted to the pointer to this field.
-// - for XXX_OneofWrappers
-func (f *oneofSubField) typedNil(g *Generator) {
- g.P("(*", f.oneofTypeName, ")(nil),")
-}
-
-// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
-func (f *oneofSubField) getProtoDef() string {
- return f.protoDef
-}
-
-// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
-func (f *oneofSubField) getProtoTypeName() string {
- return f.protoTypeName
-}
-
-// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
-func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type {
- return f.protoType
-}
-
-// oneofField represents the oneof on top level.
-// The alternative fields within the oneof are represented by oneofSubField.
-type oneofField struct {
- fieldCommon
- subFields []*oneofSubField // All the possible oneof fields
- comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\"
-}
-
-// decl prints the declaration of the field in the struct (if any).
-func (f *oneofField) decl(g *Generator, mc *msgCtx) {
- comment := f.comment
- for _, sf := range f.subFields {
- comment += "//\t*" + sf.oneofTypeName + "\n"
- }
- g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`")
-}
-
-// getter for a oneof field will print additional discriminators and interfaces for the oneof,
-// also it prints all the getters for the sub fields.
-func (f *oneofField) getter(g *Generator, mc *msgCtx) {
- // The discriminator type
- g.P("type ", f.goType, " interface {")
- g.P(f.goType, "()")
- g.P("}")
- g.P()
- // The subField types, fulfilling the discriminator type contract
- for _, sf := range f.subFields {
- g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {")
- g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`")
- g.P("}")
- g.P()
- }
- for _, sf := range f.subFields {
- g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}")
- g.P()
- }
- // Getter for the oneof field
- g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {")
- g.P("if m != nil { return m.", f.goName, " }")
- g.P("return nil")
- g.P("}")
- g.P()
- // Getters for each oneof
- for _, sf := range f.subFields {
- if sf.deprecated != "" {
- g.P(sf.deprecated)
- }
- g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {")
- g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {")
- g.P("return x.", sf.goName)
- g.P("}")
- g.P("return ", sf.getterDef)
- g.P("}")
- g.P()
- }
-}
-
-// setter prints the setter method of the field.
-func (f *oneofField) setter(g *Generator, mc *msgCtx) {
- // No setters for oneof yet
-}
-
-// topLevelField interface implemented by all types of fields on the top level (not oneofSubField).
-type topLevelField interface {
- decl(g *Generator, mc *msgCtx) // print declaration within the struct
- getter(g *Generator, mc *msgCtx) // print getter
- setter(g *Generator, mc *msgCtx) // print setter if applicable
-}
-
-// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField).
-type defField interface {
- getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5"
- getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor"
- getGoType() string // go type of the field as a string, e.g. "*int32"
- getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration"
- getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
-}
-
-// generateDefaultConstants adds constants for default values if needed, which is only if the default value is.
-// explicit in the proto.
-func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) {
- // Collect fields that can have defaults
- dFields := []defField{}
- for _, pf := range topLevelFields {
- if f, ok := pf.(*oneofField); ok {
- for _, osf := range f.subFields {
- dFields = append(dFields, osf)
- }
- continue
- }
- dFields = append(dFields, pf.(defField))
- }
- for _, df := range dFields {
- def := df.getProtoDef()
- if def == "" {
- continue
- }
- fieldname := g.defaultConstantName(mc.goName, df.getProtoName())
- typename := df.getGoType()
- if typename[0] == '*' {
- typename = typename[1:]
- }
- kind := "const "
- switch {
- case typename == "bool":
- case typename == "string":
- def = strconv.Quote(def)
- case typename == "[]byte":
- def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
- kind = "var "
- case def == "inf", def == "-inf", def == "nan":
- // These names are known to, and defined by, the protocol language.
- switch def {
- case "inf":
- def = "math.Inf(1)"
- case "-inf":
- def = "math.Inf(-1)"
- case "nan":
- def = "math.NaN()"
- }
- if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT {
- def = "float32(" + def + ")"
- }
- kind = "var "
- case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT:
- if f, err := strconv.ParseFloat(def, 32); err == nil {
- def = fmt.Sprint(float32(f))
- }
- case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE:
- if f, err := strconv.ParseFloat(def, 64); err == nil {
- def = fmt.Sprint(f)
- }
- case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM:
- // Must be an enum. Need to construct the prefixed name.
- obj := g.ObjectNamed(df.getProtoTypeName())
- var enum *EnumDescriptor
- if id, ok := obj.(*ImportedDescriptor); ok {
- // The enum type has been publicly imported.
- enum, _ = id.o.(*EnumDescriptor)
- } else {
- enum, _ = obj.(*EnumDescriptor)
- }
- if enum == nil {
- log.Printf("don't know how to generate constant for %s", fieldname)
- continue
- }
- def = g.DefaultPackageName(obj) + enum.prefix() + def
- }
- g.P(kind, fieldname, " ", typename, " = ", def)
- g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""})
- }
- g.P()
-}
-
-// generateInternalStructFields just adds the XXX_ fields to the message struct.
-func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) {
- g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals
- if len(mc.message.ExtensionRange) > 0 {
- messageset := ""
- if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
- messageset = "protobuf_messageset:\"1\" "
- }
- g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`")
- }
- g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
- g.P("XXX_sizecache\tint32 `json:\"-\"`")
-
-}
-
-// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer.
-func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) {
- ofields := []*oneofField{}
- for _, f := range topLevelFields {
- if o, ok := f.(*oneofField); ok {
- ofields = append(ofields, o)
- }
- }
- if len(ofields) == 0 {
- return
- }
-
- // OneofFuncs
- g.P("// XXX_OneofWrappers is for the internal use of the proto package.")
- g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {")
- g.P("return []interface{}{")
- for _, of := range ofields {
- for _, sf := range of.subFields {
- sf.typedNil(g)
- }
- }
- g.P("}")
- g.P("}")
- g.P()
-}
-
-// generateMessageStruct adds the actual struct with it's members (but not methods) to the output.
-func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) {
- comments := g.PrintComments(mc.message.path)
-
- // Guarantee deprecation comments appear after user-provided comments.
- if mc.message.GetOptions().GetDeprecated() {
- if comments {
- // Convention: Separate deprecation comments from original
- // comments with an empty line.
- g.P("//")
- }
- g.P(deprecationComment)
- }
-
- g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {")
- for _, pf := range topLevelFields {
- pf.decl(g, mc)
- }
- g.generateInternalStructFields(mc, topLevelFields)
- g.P("}")
-}
-
-// generateGetters adds getters for all fields, including oneofs and weak fields when applicable.
-func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) {
- for _, pf := range topLevelFields {
- pf.getter(g, mc)
- }
-}
-
-// generateSetters add setters for all fields, including oneofs and weak fields when applicable.
-func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) {
- for _, pf := range topLevelFields {
- pf.setter(g, mc)
- }
-}
-
-// generateCommonMethods adds methods to the message that are not on a per field basis.
-func (g *Generator) generateCommonMethods(mc *msgCtx) {
- // Reset, String and ProtoMessage methods.
- g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }")
- g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
- g.P("func (*", mc.goName, ") ProtoMessage() {}")
- var indexes []string
- for m := mc.message; m != nil; m = m.parent {
- indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
- }
- g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {")
- g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
- g.P("}")
- g.P()
- // TODO: Revisit the decision to use a XXX_WellKnownType method
- // if we change proto.MessageName to work with multiple equivalents.
- if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] {
- g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`)
- g.P()
- }
-
- // Extension support methods
- if len(mc.message.ExtensionRange) > 0 {
- g.P()
- g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{")
- for _, r := range mc.message.ExtensionRange {
- end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
- g.P("{Start: ", r.Start, ", End: ", end, "},")
- }
- g.P("}")
- g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
- g.P("return extRange_", mc.goName)
- g.P("}")
- g.P()
- }
-
- // TODO: It does not scale to keep adding another method for every
- // operation on protos that we want to switch over to using the
- // table-driven approach. Instead, we should only add a single method
- // that allows getting access to the *InternalMessageInfo struct and then
- // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that.
-
- // Wrapper for table-driven marshaling and unmarshaling.
- g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {")
- g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)")
- g.P("}")
-
- g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {")
- g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)")
- g.P("}")
-
- g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {")
- g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)")
- g.P("}")
-
- g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message
- g.P("return xxx_messageInfo_", mc.goName, ".Size(m)")
- g.P("}")
-
- g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {")
- g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)")
- g.P("}")
-
- g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo")
- g.P()
-}
-
-// Generate the type, methods and default constant definitions for this Descriptor.
-func (g *Generator) generateMessage(message *Descriptor) {
- topLevelFields := []topLevelField{}
- oFields := make(map[int32]*oneofField)
- // The full type name
- typeName := message.TypeName()
- // The full type name, CamelCased.
- goTypeName := CamelCaseSlice(typeName)
-
- usedNames := make(map[string]bool)
- for _, n := range methodNames {
- usedNames[n] = true
- }
-
- // allocNames finds a conflict-free variation of the given strings,
- // consistently mutating their suffixes.
- // It returns the same number of strings.
- allocNames := func(ns ...string) []string {
- Loop:
- for {
- for _, n := range ns {
- if usedNames[n] {
- for i := range ns {
- ns[i] += "_"
- }
- continue Loop
- }
- }
- for _, n := range ns {
- usedNames[n] = true
- }
- return ns
- }
- }
-
- mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later
-
- // Build a structure more suitable for generating the text in one pass
- for i, field := range message.Field {
- // Allocate the getter and the field at the same time so name
- // collisions create field/method consistent names.
- // TODO: This allocation occurs based on the order of the fields
- // in the proto file, meaning that a change in the field
- // ordering can change generated Method/Field names.
- base := CamelCase(*field.Name)
- ns := allocNames(base, "Get"+base)
- fieldName, fieldGetterName := ns[0], ns[1]
- typename, wiretype := g.GoType(message, field)
- jsonName := *field.Name
- tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
-
- oneof := field.OneofIndex != nil
- if oneof && oFields[*field.OneofIndex] == nil {
- odp := message.OneofDecl[int(*field.OneofIndex)]
- base := CamelCase(odp.GetName())
- fname := allocNames(base)[0]
-
- // This is the first field of a oneof we haven't seen before.
- // Generate the union field.
- oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)
- c, ok := g.makeComments(oneofFullPath)
- if ok {
- c += "\n//\n"
- }
- c += "// Types that are valid to be assigned to " + fname + ":\n"
- // Generate the rest of this comment later,
- // when we've computed any disambiguation.
-
- dname := "is" + goTypeName + "_" + fname
- tag := `protobuf_oneof:"` + odp.GetName() + `"`
- of := oneofField{
- fieldCommon: fieldCommon{
- goName: fname,
- getterName: "Get"+fname,
- goType: dname,
- tags: tag,
- protoName: odp.GetName(),
- fullPath: oneofFullPath,
- },
- comment: c,
- }
- topLevelFields = append(topLevelFields, &of)
- oFields[*field.OneofIndex] = &of
- }
-
- if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
- desc := g.ObjectNamed(field.GetTypeName())
- if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
- // Figure out the Go types and tags for the key and value types.
- keyField, valField := d.Field[0], d.Field[1]
- keyType, keyWire := g.GoType(d, keyField)
- valType, valWire := g.GoType(d, valField)
- keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
-
- // We don't use stars, except for message-typed values.
- // Message and enum types are the only two possibly foreign types used in maps,
- // so record their use. They are not permitted as map keys.
- keyType = strings.TrimPrefix(keyType, "*")
- switch *valField.Type {
- case descriptor.FieldDescriptorProto_TYPE_ENUM:
- valType = strings.TrimPrefix(valType, "*")
- g.RecordTypeUse(valField.GetTypeName())
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
- g.RecordTypeUse(valField.GetTypeName())
- default:
- valType = strings.TrimPrefix(valType, "*")
- }
-
- typename = fmt.Sprintf("map[%s]%s", keyType, valType)
- mapFieldTypes[field] = typename // record for the getter generation
-
- tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
- }
- }
-
- fieldDeprecated := ""
- if field.GetOptions().GetDeprecated() {
- fieldDeprecated = deprecationComment
- }
-
- dvalue := g.getterDefault(field, goTypeName)
- if oneof {
- tname := goTypeName + "_" + fieldName
- // It is possible for this to collide with a message or enum
- // nested in this message. Check for collisions.
- for {
- ok := true
- for _, desc := range message.nested {
- if CamelCaseSlice(desc.TypeName()) == tname {
- ok = false
- break
- }
- }
- for _, enum := range message.enums {
- if CamelCaseSlice(enum.TypeName()) == tname {
- ok = false
- break
- }
- }
- if !ok {
- tname += "_"
- continue
- }
- break
- }
-
- oneofField := oFields[*field.OneofIndex]
- tag := "protobuf:" + g.goTag(message, field, wiretype)
- sf := oneofSubField{
- fieldCommon: fieldCommon{
- goName: fieldName,
- getterName: fieldGetterName,
- goType: typename,
- tags: tag,
- protoName: field.GetName(),
- fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i),
- },
- protoTypeName: field.GetTypeName(),
- fieldNumber: int(*field.Number),
- protoType: *field.Type,
- getterDef: dvalue,
- protoDef: field.GetDefaultValue(),
- oneofTypeName: tname,
- deprecated: fieldDeprecated,
- }
- oneofField.subFields = append(oneofField.subFields, &sf)
- g.RecordTypeUse(field.GetTypeName())
- continue
- }
-
- fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)
- c, ok := g.makeComments(fieldFullPath)
- if ok {
- c += "\n"
- }
- rf := simpleField{
- fieldCommon: fieldCommon{
- goName: fieldName,
- getterName: fieldGetterName,
- goType: typename,
- tags: tag,
- protoName: field.GetName(),
- fullPath: fieldFullPath,
- },
- protoTypeName: field.GetTypeName(),
- protoType: *field.Type,
- deprecated: fieldDeprecated,
- getterDef: dvalue,
- protoDef: field.GetDefaultValue(),
- comment: c,
- }
- var pf topLevelField = &rf
-
- topLevelFields = append(topLevelFields, pf)
- g.RecordTypeUse(field.GetTypeName())
- }
-
- mc := &msgCtx{
- goName: goTypeName,
- message: message,
- }
-
- g.generateMessageStruct(mc, topLevelFields)
- g.P()
- g.generateCommonMethods(mc)
- g.P()
- g.generateDefaultConstants(mc, topLevelFields)
- g.P()
- g.generateGetters(mc, topLevelFields)
- g.P()
- g.generateSetters(mc, topLevelFields)
- g.P()
- g.generateOneofFuncs(mc, topLevelFields)
- g.P()
-
- var oneofTypes []string
- for _, f := range topLevelFields {
- if of, ok := f.(*oneofField); ok {
- for _, osf := range of.subFields {
- oneofTypes = append(oneofTypes, osf.oneofTypeName)
- }
- }
- }
-
- opts := message.Options
- ms := &messageSymbol{
- sym: goTypeName,
- hasExtensions: len(message.ExtensionRange) > 0,
- isMessageSet: opts != nil && opts.GetMessageSetWireFormat(),
- oneofTypes: oneofTypes,
- }
- g.file.addExport(message, ms)
-
- for _, ext := range message.ext {
- g.generateExtension(ext)
- }
-
- fullName := strings.Join(message.TypeName(), ".")
- if g.file.Package != nil {
- fullName = *g.file.Package + "." + fullName
- }
-
- g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName)
- // Register types for native map types.
- for _, k := range mapFieldKeys(mapFieldTypes) {
- fullName := strings.TrimPrefix(*k.TypeName, ".")
- g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName)
- }
-
-}
-
-type byTypeName []*descriptor.FieldDescriptorProto
-
-func (a byTypeName) Len() int { return len(a) }
-func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName }
-
-// mapFieldKeys returns the keys of m in a consistent order.
-func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto {
- keys := make([]*descriptor.FieldDescriptorProto, 0, len(m))
- for k := range m {
- keys = append(keys, k)
- }
- sort.Sort(byTypeName(keys))
- return keys
-}
-
-var escapeChars = [256]byte{
- 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
-}
-
-// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
-// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
-// sequences are conveyed, unmodified, into the decoded result.
-func unescape(s string) string {
- // NB: Sadly, we can't use strconv.Unquote because protoc will escape both
- // single and double quotes, but strconv.Unquote only allows one or the
- // other (based on actual surrounding quotes of its input argument).
-
- var out []byte
- for len(s) > 0 {
- // regular character, or too short to be valid escape
- if s[0] != '\\' || len(s) < 2 {
- out = append(out, s[0])
- s = s[1:]
- } else if c := escapeChars[s[1]]; c != 0 {
- // escape sequence
- out = append(out, c)
- s = s[2:]
- } else if s[1] == 'x' || s[1] == 'X' {
- // hex escape, e.g. "\x80
- if len(s) < 4 {
- // too short to be valid
- out = append(out, s[:2]...)
- s = s[2:]
- continue
- }
- v, err := strconv.ParseUint(s[2:4], 16, 8)
- if err != nil {
- out = append(out, s[:4]...)
- } else {
- out = append(out, byte(v))
- }
- s = s[4:]
- } else if '0' <= s[1] && s[1] <= '7' {
- // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
- // so consume up to 2 more bytes or up to end-of-string
- n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
- if n > 3 {
- n = 3
- }
- v, err := strconv.ParseUint(s[1:1+n], 8, 8)
- if err != nil {
- out = append(out, s[:1+n]...)
- } else {
- out = append(out, byte(v))
- }
- s = s[1+n:]
- } else {
- // bad escape, just propagate the slash as-is
- out = append(out, s[0])
- s = s[1:]
- }
- }
-
- return string(out)
-}
-
-func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
- ccTypeName := ext.DescName()
-
- extObj := g.ObjectNamed(*ext.Extendee)
- var extDesc *Descriptor
- if id, ok := extObj.(*ImportedDescriptor); ok {
- // This is extending a publicly imported message.
- // We need the underlying type for goTag.
- extDesc = id.o.(*Descriptor)
- } else {
- extDesc = extObj.(*Descriptor)
- }
- extendedType := "*" + g.TypeName(extObj) // always use the original
- field := ext.FieldDescriptorProto
- fieldType, wireType := g.GoType(ext.parent, field)
- tag := g.goTag(extDesc, field, wireType)
- g.RecordTypeUse(*ext.Extendee)
- if n := ext.FieldDescriptorProto.TypeName; n != nil {
- // foreign extension type
- g.RecordTypeUse(*n)
- }
-
- typeName := ext.TypeName()
-
- // Special case for proto2 message sets: If this extension is extending
- // proto2.bridge.MessageSet, and its final name component is "message_set_extension",
- // then drop that last component.
- //
- // TODO: This should be implemented in the text formatter rather than the generator.
- // In addition, the situation for when to apply this special case is implemented
- // differently in other languages:
- // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560
- if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" {
- typeName = typeName[:len(typeName)-1]
- }
-
- // For text formatting, the package must be exactly what the .proto file declares,
- // ignoring overrides such as the go_package option, and with no dot/underscore mapping.
- extName := strings.Join(typeName, ".")
- if g.file.Package != nil {
- extName = *g.file.Package + "." + extName
- }
-
- g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
- g.P("ExtendedType: (", extendedType, ")(nil),")
- g.P("ExtensionType: (", fieldType, ")(nil),")
- g.P("Field: ", field.Number, ",")
- g.P(`Name: "`, extName, `",`)
- g.P("Tag: ", tag, ",")
- g.P(`Filename: "`, g.file.GetName(), `",`)
-
- g.P("}")
- g.P()
-
- g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
-
- g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
-}
-
-func (g *Generator) generateInitFunction() {
- if len(g.init) == 0 {
- return
- }
- g.P("func init() {")
- for _, l := range g.init {
- g.P(l)
- }
- g.P("}")
- g.init = nil
-}
-
-func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
- // Make a copy and trim source_code_info data.
- // TODO: Trim this more when we know exactly what we need.
- pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
- pb.SourceCodeInfo = nil
-
- b, err := proto.Marshal(pb)
- if err != nil {
- g.Fail(err.Error())
- }
-
- var buf bytes.Buffer
- w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
- w.Write(b)
- w.Close()
- b = buf.Bytes()
-
- v := file.VarName()
- g.P()
- g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }")
- g.P("var ", v, " = []byte{")
- g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
- for len(b) > 0 {
- n := 16
- if n > len(b) {
- n = len(b)
- }
-
- s := ""
- for _, c := range b[:n] {
- s += fmt.Sprintf("0x%02x,", c)
- }
- g.P(s)
-
- b = b[n:]
- }
- g.P("}")
-}
-
-func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
- // // We always print the full (proto-world) package name here.
- pkg := enum.File().GetPackage()
- if pkg != "" {
- pkg += "."
- }
- // The full type name
- typeName := enum.TypeName()
- // The full type name, CamelCased.
- ccTypeName := CamelCaseSlice(typeName)
- g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
-}
-
-// And now lots of helper functions.
-
-// Is c an ASCII lower-case letter?
-func isASCIILower(c byte) bool {
- return 'a' <= c && c <= 'z'
-}
-
-// Is c an ASCII digit?
-func isASCIIDigit(c byte) bool {
- return '0' <= c && c <= '9'
-}
-
-// CamelCase returns the CamelCased name.
-// If there is an interior underscore followed by a lower case letter,
-// drop the underscore and convert the letter to upper case.
-// There is a remote possibility of this rewrite causing a name collision,
-// but it's so remote we're prepared to pretend it's nonexistent - since the
-// C++ generator lowercases names, it's extremely unlikely to have two fields
-// with different capitalizations.
-// In short, _my_field_name_2 becomes XMyFieldName_2.
-func CamelCase(s string) string {
- if s == "" {
- return ""
- }
- t := make([]byte, 0, 32)
- i := 0
- if s[0] == '_' {
- // Need a capital letter; drop the '_'.
- t = append(t, 'X')
- i++
- }
- // Invariant: if the next letter is lower case, it must be converted
- // to upper case.
- // That is, we process a word at a time, where words are marked by _ or
- // upper case letter. Digits are treated as words.
- for ; i < len(s); i++ {
- c := s[i]
- if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
- continue // Skip the underscore in s.
- }
- if isASCIIDigit(c) {
- t = append(t, c)
- continue
- }
- // Assume we have a letter now - if not, it's a bogus identifier.
- // The next word is a sequence of characters that must start upper case.
- if isASCIILower(c) {
- c ^= ' ' // Make it a capital letter.
- }
- t = append(t, c) // Guaranteed not lower case.
- // Accept lower case sequence that follows.
- for i+1 < len(s) && isASCIILower(s[i+1]) {
- i++
- t = append(t, s[i])
- }
- }
- return string(t)
-}
-
-// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
-// be joined with "_".
-func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
-
-// dottedSlice turns a sliced name into a dotted name.
-func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
-
-// Is this field optional?
-func isOptional(field *descriptor.FieldDescriptorProto) bool {
- return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
-}
-
-// Is this field required?
-func isRequired(field *descriptor.FieldDescriptorProto) bool {
- return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
-}
-
-// Is this field repeated?
-func isRepeated(field *descriptor.FieldDescriptorProto) bool {
- return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
-}
-
-// Is this field a scalar numeric type?
-func isScalar(field *descriptor.FieldDescriptorProto) bool {
- if field.Type == nil {
- return false
- }
- switch *field.Type {
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
- descriptor.FieldDescriptorProto_TYPE_FLOAT,
- descriptor.FieldDescriptorProto_TYPE_INT64,
- descriptor.FieldDescriptorProto_TYPE_UINT64,
- descriptor.FieldDescriptorProto_TYPE_INT32,
- descriptor.FieldDescriptorProto_TYPE_FIXED64,
- descriptor.FieldDescriptorProto_TYPE_FIXED32,
- descriptor.FieldDescriptorProto_TYPE_BOOL,
- descriptor.FieldDescriptorProto_TYPE_UINT32,
- descriptor.FieldDescriptorProto_TYPE_ENUM,
- descriptor.FieldDescriptorProto_TYPE_SFIXED32,
- descriptor.FieldDescriptorProto_TYPE_SFIXED64,
- descriptor.FieldDescriptorProto_TYPE_SINT32,
- descriptor.FieldDescriptorProto_TYPE_SINT64:
- return true
- default:
- return false
- }
-}
-
-// badToUnderscore is the mapping function used to generate Go names from package names,
-// which can be dotted in the input .proto file. It replaces non-identifier characters such as
-// dot or dash with underscore.
-func badToUnderscore(r rune) rune {
- if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
- return r
- }
- return '_'
-}
-
-// baseName returns the last path element of the name, with the last dotted suffix removed.
-func baseName(name string) string {
- // First, find the last element
- if i := strings.LastIndex(name, "/"); i >= 0 {
- name = name[i+1:]
- }
- // Now drop the suffix
- if i := strings.LastIndex(name, "."); i >= 0 {
- name = name[0:i]
- }
- return name
-}
-
-// The SourceCodeInfo message describes the location of elements of a parsed
-// .proto file by way of a "path", which is a sequence of integers that
-// describe the route from a FileDescriptorProto to the relevant submessage.
-// The path alternates between a field number of a repeated field, and an index
-// into that repeated field. The constants below define the field numbers that
-// are used.
-//
-// See descriptor.proto for more information about this.
-const (
- // tag numbers in FileDescriptorProto
- packagePath = 2 // package
- messagePath = 4 // message_type
- enumPath = 5 // enum_type
- // tag numbers in DescriptorProto
- messageFieldPath = 2 // field
- messageMessagePath = 3 // nested_type
- messageEnumPath = 4 // enum_type
- messageOneofPath = 8 // oneof_decl
- // tag numbers in EnumDescriptorProto
- enumValuePath = 2 // value
-)
-
-var supportTypeAliases bool
-
-func init() {
- for _, tag := range build.Default.ReleaseTags {
- if tag == "go1.9" {
- supportTypeAliases = true
- return
- }
- }
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
deleted file mode 100644
index a9b61036c..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package remap handles tracking the locations of Go tokens in a source text
-across a rewrite by the Go formatter.
-*/
-package remap
-
-import (
- "fmt"
- "go/scanner"
- "go/token"
-)
-
-// A Location represents a span of byte offsets in the source text.
-type Location struct {
- Pos, End int // End is exclusive
-}
-
-// A Map represents a mapping between token locations in an input source text
-// and locations in the correspnding output text.
-type Map map[Location]Location
-
-// Find reports whether the specified span is recorded by m, and if so returns
-// the new location it was mapped to. If the input span was not found, the
-// returned location is the same as the input.
-func (m Map) Find(pos, end int) (Location, bool) {
- key := Location{
- Pos: pos,
- End: end,
- }
- if loc, ok := m[key]; ok {
- return loc, true
- }
- return key, false
-}
-
-func (m Map) add(opos, oend, npos, nend int) {
- m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
-}
-
-// Compute constructs a location mapping from input to output. An error is
-// reported if any of the tokens of output cannot be mapped.
-func Compute(input, output []byte) (Map, error) {
- itok := tokenize(input)
- otok := tokenize(output)
- if len(itok) != len(otok) {
- return nil, fmt.Errorf("wrong number of tokens, %d ≠%d", len(itok), len(otok))
- }
- m := make(Map)
- for i, ti := range itok {
- to := otok[i]
- if ti.Token != to.Token {
- return nil, fmt.Errorf("token %d type mismatch: %s ≠%s", i+1, ti, to)
- }
- m.add(ti.pos, ti.end, to.pos, to.end)
- }
- return m, nil
-}
-
-// tokinfo records the span and type of a source token.
-type tokinfo struct {
- pos, end int
- token.Token
-}
-
-func tokenize(src []byte) []tokinfo {
- fs := token.NewFileSet()
- var s scanner.Scanner
- s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
- var info []tokinfo
- for {
- pos, next, lit := s.Scan()
- switch next {
- case token.SEMICOLON:
- continue
- }
- info = append(info, tokinfo{
- pos: int(pos - 1),
- end: int(pos + token.Pos(len(lit)) - 1),
- Token: next,
- })
- if next == token.EOF {
- break
- }
- }
- return info
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
deleted file mode 100644
index 61bfc10e0..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/compiler/plugin.proto
-
-/*
-Package plugin_go is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/compiler/plugin.proto
-
-It has these top-level messages:
- Version
- CodeGeneratorRequest
- CodeGeneratorResponse
-*/
-package plugin_go
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// The version number of protocol compiler.
-type Version struct {
- Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
- Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
- Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
- // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
- // be empty for mainline stable releases.
- Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Version) Reset() { *m = Version{} }
-func (m *Version) String() string { return proto.CompactTextString(m) }
-func (*Version) ProtoMessage() {}
-func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (m *Version) Unmarshal(b []byte) error {
- return xxx_messageInfo_Version.Unmarshal(m, b)
-}
-func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Version.Marshal(b, m, deterministic)
-}
-func (dst *Version) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Version.Merge(dst, src)
-}
-func (m *Version) XXX_Size() int {
- return xxx_messageInfo_Version.Size(m)
-}
-func (m *Version) XXX_DiscardUnknown() {
- xxx_messageInfo_Version.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Version proto.InternalMessageInfo
-
-func (m *Version) GetMajor() int32 {
- if m != nil && m.Major != nil {
- return *m.Major
- }
- return 0
-}
-
-func (m *Version) GetMinor() int32 {
- if m != nil && m.Minor != nil {
- return *m.Minor
- }
- return 0
-}
-
-func (m *Version) GetPatch() int32 {
- if m != nil && m.Patch != nil {
- return *m.Patch
- }
- return 0
-}
-
-func (m *Version) GetSuffix() string {
- if m != nil && m.Suffix != nil {
- return *m.Suffix
- }
- return ""
-}
-
-// An encoded CodeGeneratorRequest is written to the plugin's stdin.
-type CodeGeneratorRequest struct {
- // The .proto files that were explicitly listed on the command-line. The
- // code generator should generate code only for these files. Each file's
- // descriptor will be included in proto_file, below.
- FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
- // The generator parameter passed on the command-line.
- Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
- // FileDescriptorProtos for all files in files_to_generate and everything
- // they import. The files will appear in topological order, so each file
- // appears before any file that imports it.
- //
- // protoc guarantees that all proto_files will be written after
- // the fields above, even though this is not technically guaranteed by the
- // protobuf wire format. This theoretically could allow a plugin to stream
- // in the FileDescriptorProtos and handle them one by one rather than read
- // the entire set into memory at once. However, as of this writing, this
- // is not similarly optimized on protoc's end -- it will store all fields in
- // memory at once before sending them to the plugin.
- //
- // Type names of fields and extensions in the FileDescriptorProto are always
- // fully qualified.
- ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
- // The version number of protocol compiler.
- CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} }
-func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) }
-func (*CodeGeneratorRequest) ProtoMessage() {}
-func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
- return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
-}
-func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
-}
-func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
-}
-func (m *CodeGeneratorRequest) XXX_Size() int {
- return xxx_messageInfo_CodeGeneratorRequest.Size(m)
-}
-func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
-
-func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
- if m != nil {
- return m.FileToGenerate
- }
- return nil
-}
-
-func (m *CodeGeneratorRequest) GetParameter() string {
- if m != nil && m.Parameter != nil {
- return *m.Parameter
- }
- return ""
-}
-
-func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
- if m != nil {
- return m.ProtoFile
- }
- return nil
-}
-
-func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
- if m != nil {
- return m.CompilerVersion
- }
- return nil
-}
-
-// The plugin writes an encoded CodeGeneratorResponse to stdout.
-type CodeGeneratorResponse struct {
- // Error message. If non-empty, code generation failed. The plugin process
- // should exit with status code zero even if it reports an error in this way.
- //
- // This should be used to indicate errors in .proto files which prevent the
- // code generator from generating correct code. Errors which indicate a
- // problem in protoc itself -- such as the input CodeGeneratorRequest being
- // unparseable -- should be reported by writing a message to stderr and
- // exiting with a non-zero status code.
- Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
- File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} }
-func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) }
-func (*CodeGeneratorResponse) ProtoMessage() {}
-func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
- return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
-}
-func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
-}
-func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
-}
-func (m *CodeGeneratorResponse) XXX_Size() int {
- return xxx_messageInfo_CodeGeneratorResponse.Size(m)
-}
-func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
-
-func (m *CodeGeneratorResponse) GetError() string {
- if m != nil && m.Error != nil {
- return *m.Error
- }
- return ""
-}
-
-func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
- if m != nil {
- return m.File
- }
- return nil
-}
-
-// Represents a single generated file.
-type CodeGeneratorResponse_File struct {
- // The file name, relative to the output directory. The name must not
- // contain "." or ".." components and must be relative, not be absolute (so,
- // the file cannot lie outside the output directory). "/" must be used as
- // the path separator, not "\".
- //
- // If the name is omitted, the content will be appended to the previous
- // file. This allows the generator to break large files into small chunks,
- // and allows the generated text to be streamed back to protoc so that large
- // files need not reside completely in memory at one time. Note that as of
- // this writing protoc does not optimize for this -- it will read the entire
- // CodeGeneratorResponse before writing files to disk.
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- // If non-empty, indicates that the named file should already exist, and the
- // content here is to be inserted into that file at a defined insertion
- // point. This feature allows a code generator to extend the output
- // produced by another code generator. The original generator may provide
- // insertion points by placing special annotations in the file that look
- // like:
- // @@protoc_insertion_point(NAME)
- // The annotation can have arbitrary text before and after it on the line,
- // which allows it to be placed in a comment. NAME should be replaced with
- // an identifier naming the point -- this is what other generators will use
- // as the insertion_point. Code inserted at this point will be placed
- // immediately above the line containing the insertion point (thus multiple
- // insertions to the same point will come out in the order they were added).
- // The double-@ is intended to make it unlikely that the generated code
- // could contain things that look like insertion points by accident.
- //
- // For example, the C++ code generator places the following line in the
- // .pb.h files that it generates:
- // // @@protoc_insertion_point(namespace_scope)
- // This line appears within the scope of the file's package namespace, but
- // outside of any particular class. Another plugin can then specify the
- // insertion_point "namespace_scope" to generate additional classes or
- // other declarations that should be placed in this scope.
- //
- // Note that if the line containing the insertion point begins with
- // whitespace, the same whitespace will be added to every line of the
- // inserted text. This is useful for languages like Python, where
- // indentation matters. In these languages, the insertion point comment
- // should be indented the same amount as any inserted code will need to be
- // in order to work correctly in that context.
- //
- // The code generator that generates the initial file and the one which
- // inserts into it must both run as part of a single invocation of protoc.
- // Code generators are executed in the order in which they appear on the
- // command line.
- //
- // If |insertion_point| is present, |name| must also be present.
- InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
- // The file contents.
- Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} }
-func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) }
-func (*CodeGeneratorResponse_File) ProtoMessage() {}
-func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
-func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
- return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
-}
-func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
-}
-func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
-}
-func (m *CodeGeneratorResponse_File) XXX_Size() int {
- return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
-}
-func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
- xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
-
-func (m *CodeGeneratorResponse_File) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
- if m != nil && m.InsertionPoint != nil {
- return *m.InsertionPoint
- }
- return ""
-}
-
-func (m *CodeGeneratorResponse_File) GetContent() string {
- if m != nil && m.Content != nil {
- return *m.Content
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
- proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
- proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
- proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
-}
-
-func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 417 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
- 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
- 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
- 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
- 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
- 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
- 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
- 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
- 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
- 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
- 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
- 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
- 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
- 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
- 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
- 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
- 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
- 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
- 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
- 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
- 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
- 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
- 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
- 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
- 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
- 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
- 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
deleted file mode 100644
index 8953d0ff8..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
+++ /dev/null
@@ -1,83 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google/protobuf/compiler/plugin.proto
-// DO NOT EDIT!
-
-package google_protobuf_compiler
-
-import proto "github.com/golang/protobuf/proto"
-import "math"
-import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
-
-// Reference proto and math imports to suppress error if they are not otherwise used.
-var _ = proto.GetString
-var _ = math.Inf
-
-type CodeGeneratorRequest struct {
- FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
- Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
- ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
-func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
-func (*CodeGeneratorRequest) ProtoMessage() {}
-
-func (this *CodeGeneratorRequest) GetParameter() string {
- if this != nil && this.Parameter != nil {
- return *this.Parameter
- }
- return ""
-}
-
-type CodeGeneratorResponse struct {
- Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
- File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
-func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
-func (*CodeGeneratorResponse) ProtoMessage() {}
-
-func (this *CodeGeneratorResponse) GetError() string {
- if this != nil && this.Error != nil {
- return *this.Error
- }
- return ""
-}
-
-type CodeGeneratorResponse_File struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
- Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
-func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
-func (*CodeGeneratorResponse_File) ProtoMessage() {}
-
-func (this *CodeGeneratorResponse_File) GetName() string {
- if this != nil && this.Name != nil {
- return *this.Name
- }
- return ""
-}
-
-func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
- if this != nil && this.InsertionPoint != nil {
- return *this.InsertionPoint
- }
- return ""
-}
-
-func (this *CodeGeneratorResponse_File) GetContent() string {
- if this != nil && this.Content != nil {
- return *this.Content
- }
- return ""
-}
-
-func init() {
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
deleted file mode 100644
index 5b5574529..000000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
+++ /dev/null
@@ -1,167 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Author: kenton@google.com (Kenton Varda)
-//
-// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
-// change.
-//
-// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
-// just a program that reads a CodeGeneratorRequest from stdin and writes a
-// CodeGeneratorResponse to stdout.
-//
-// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
-// of dealing with the raw protocol defined here.
-//
-// A plugin executable needs only to be placed somewhere in the path. The
-// plugin should be named "protoc-gen-$NAME", and will then be used when the
-// flag "--${NAME}_out" is passed to protoc.
-
-syntax = "proto2";
-package google.protobuf.compiler;
-option java_package = "com.google.protobuf.compiler";
-option java_outer_classname = "PluginProtos";
-
-option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
-
-import "google/protobuf/descriptor.proto";
-
-// The version number of protocol compiler.
-message Version {
- optional int32 major = 1;
- optional int32 minor = 2;
- optional int32 patch = 3;
- // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
- // be empty for mainline stable releases.
- optional string suffix = 4;
-}
-
-// An encoded CodeGeneratorRequest is written to the plugin's stdin.
-message CodeGeneratorRequest {
- // The .proto files that were explicitly listed on the command-line. The
- // code generator should generate code only for these files. Each file's
- // descriptor will be included in proto_file, below.
- repeated string file_to_generate = 1;
-
- // The generator parameter passed on the command-line.
- optional string parameter = 2;
-
- // FileDescriptorProtos for all files in files_to_generate and everything
- // they import. The files will appear in topological order, so each file
- // appears before any file that imports it.
- //
- // protoc guarantees that all proto_files will be written after
- // the fields above, even though this is not technically guaranteed by the
- // protobuf wire format. This theoretically could allow a plugin to stream
- // in the FileDescriptorProtos and handle them one by one rather than read
- // the entire set into memory at once. However, as of this writing, this
- // is not similarly optimized on protoc's end -- it will store all fields in
- // memory at once before sending them to the plugin.
- //
- // Type names of fields and extensions in the FileDescriptorProto are always
- // fully qualified.
- repeated FileDescriptorProto proto_file = 15;
-
- // The version number of protocol compiler.
- optional Version compiler_version = 3;
-
-}
-
-// The plugin writes an encoded CodeGeneratorResponse to stdout.
-message CodeGeneratorResponse {
- // Error message. If non-empty, code generation failed. The plugin process
- // should exit with status code zero even if it reports an error in this way.
- //
- // This should be used to indicate errors in .proto files which prevent the
- // code generator from generating correct code. Errors which indicate a
- // problem in protoc itself -- such as the input CodeGeneratorRequest being
- // unparseable -- should be reported by writing a message to stderr and
- // exiting with a non-zero status code.
- optional string error = 1;
-
- // Represents a single generated file.
- message File {
- // The file name, relative to the output directory. The name must not
- // contain "." or ".." components and must be relative, not be absolute (so,
- // the file cannot lie outside the output directory). "/" must be used as
- // the path separator, not "\".
- //
- // If the name is omitted, the content will be appended to the previous
- // file. This allows the generator to break large files into small chunks,
- // and allows the generated text to be streamed back to protoc so that large
- // files need not reside completely in memory at one time. Note that as of
- // this writing protoc does not optimize for this -- it will read the entire
- // CodeGeneratorResponse before writing files to disk.
- optional string name = 1;
-
- // If non-empty, indicates that the named file should already exist, and the
- // content here is to be inserted into that file at a defined insertion
- // point. This feature allows a code generator to extend the output
- // produced by another code generator. The original generator may provide
- // insertion points by placing special annotations in the file that look
- // like:
- // @@protoc_insertion_point(NAME)
- // The annotation can have arbitrary text before and after it on the line,
- // which allows it to be placed in a comment. NAME should be replaced with
- // an identifier naming the point -- this is what other generators will use
- // as the insertion_point. Code inserted at this point will be placed
- // immediately above the line containing the insertion point (thus multiple
- // insertions to the same point will come out in the order they were added).
- // The double-@ is intended to make it unlikely that the generated code
- // could contain things that look like insertion points by accident.
- //
- // For example, the C++ code generator places the following line in the
- // .pb.h files that it generates:
- // // @@protoc_insertion_point(namespace_scope)
- // This line appears within the scope of the file's package namespace, but
- // outside of any particular class. Another plugin can then specify the
- // insertion_point "namespace_scope" to generate additional classes or
- // other declarations that should be placed in this scope.
- //
- // Note that if the line containing the insertion point begins with
- // whitespace, the same whitespace will be added to every line of the
- // inserted text. This is useful for languages like Python, where
- // indentation matters. In these languages, the insertion point comment
- // should be indented the same amount as any inserted code will need to be
- // in order to work correctly in that context.
- //
- // The code generator that generates the initial file and the one which
- // inserts into it must both run as part of a single invocation of protoc.
- // Code generators are executed in the order in which they appear on the
- // command line.
- //
- // If |insertion_point| is present, |name| must also be present.
- optional string insertion_point = 2;
-
- // The file contents.
- optional string content = 15;
- }
- repeated File file = 15;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
deleted file mode 100644
index 33daa73dd..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/struct.proto
-
-package structpb
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-// The JSON representation for `NullValue` is JSON `null`.
-type NullValue int32
-
-const (
- // Null value.
- NullValue_NULL_VALUE NullValue = 0
-)
-
-var NullValue_name = map[int32]string{
- 0: "NULL_VALUE",
-}
-
-var NullValue_value = map[string]int32{
- "NULL_VALUE": 0,
-}
-
-func (x NullValue) String() string {
- return proto.EnumName(NullValue_name, int32(x))
-}
-
-func (NullValue) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{0}
-}
-
-func (NullValue) XXX_WellKnownType() string { return "NullValue" }
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-type Struct struct {
- // Unordered map of dynamically typed values.
- Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Struct) Reset() { *m = Struct{} }
-func (m *Struct) String() string { return proto.CompactTextString(m) }
-func (*Struct) ProtoMessage() {}
-func (*Struct) Descriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{0}
-}
-
-func (*Struct) XXX_WellKnownType() string { return "Struct" }
-
-func (m *Struct) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Struct.Unmarshal(m, b)
-}
-func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
-}
-func (m *Struct) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Struct.Merge(m, src)
-}
-func (m *Struct) XXX_Size() int {
- return xxx_messageInfo_Struct.Size(m)
-}
-func (m *Struct) XXX_DiscardUnknown() {
- xxx_messageInfo_Struct.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Struct proto.InternalMessageInfo
-
-func (m *Struct) GetFields() map[string]*Value {
- if m != nil {
- return m.Fields
- }
- return nil
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-type Value struct {
- // The kind of value.
- //
- // Types that are valid to be assigned to Kind:
- // *Value_NullValue
- // *Value_NumberValue
- // *Value_StringValue
- // *Value_BoolValue
- // *Value_StructValue
- // *Value_ListValue
- Kind isValue_Kind `protobuf_oneof:"kind"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Value) Reset() { *m = Value{} }
-func (m *Value) String() string { return proto.CompactTextString(m) }
-func (*Value) ProtoMessage() {}
-func (*Value) Descriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{1}
-}
-
-func (*Value) XXX_WellKnownType() string { return "Value" }
-
-func (m *Value) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Value.Unmarshal(m, b)
-}
-func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Value.Marshal(b, m, deterministic)
-}
-func (m *Value) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Value.Merge(m, src)
-}
-func (m *Value) XXX_Size() int {
- return xxx_messageInfo_Value.Size(m)
-}
-func (m *Value) XXX_DiscardUnknown() {
- xxx_messageInfo_Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Value proto.InternalMessageInfo
-
-type isValue_Kind interface {
- isValue_Kind()
-}
-
-type Value_NullValue struct {
- NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
-}
-
-type Value_NumberValue struct {
- NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
-}
-
-type Value_StringValue struct {
- StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
-}
-
-type Value_BoolValue struct {
- BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
-}
-
-type Value_StructValue struct {
- StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
-}
-
-type Value_ListValue struct {
- ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
-}
-
-func (*Value_NullValue) isValue_Kind() {}
-
-func (*Value_NumberValue) isValue_Kind() {}
-
-func (*Value_StringValue) isValue_Kind() {}
-
-func (*Value_BoolValue) isValue_Kind() {}
-
-func (*Value_StructValue) isValue_Kind() {}
-
-func (*Value_ListValue) isValue_Kind() {}
-
-func (m *Value) GetKind() isValue_Kind {
- if m != nil {
- return m.Kind
- }
- return nil
-}
-
-func (m *Value) GetNullValue() NullValue {
- if x, ok := m.GetKind().(*Value_NullValue); ok {
- return x.NullValue
- }
- return NullValue_NULL_VALUE
-}
-
-func (m *Value) GetNumberValue() float64 {
- if x, ok := m.GetKind().(*Value_NumberValue); ok {
- return x.NumberValue
- }
- return 0
-}
-
-func (m *Value) GetStringValue() string {
- if x, ok := m.GetKind().(*Value_StringValue); ok {
- return x.StringValue
- }
- return ""
-}
-
-func (m *Value) GetBoolValue() bool {
- if x, ok := m.GetKind().(*Value_BoolValue); ok {
- return x.BoolValue
- }
- return false
-}
-
-func (m *Value) GetStructValue() *Struct {
- if x, ok := m.GetKind().(*Value_StructValue); ok {
- return x.StructValue
- }
- return nil
-}
-
-func (m *Value) GetListValue() *ListValue {
- if x, ok := m.GetKind().(*Value_ListValue); ok {
- return x.ListValue
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Value) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Value_NullValue)(nil),
- (*Value_NumberValue)(nil),
- (*Value_StringValue)(nil),
- (*Value_BoolValue)(nil),
- (*Value_StructValue)(nil),
- (*Value_ListValue)(nil),
- }
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-type ListValue struct {
- // Repeated field of dynamically typed values.
- Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ListValue) Reset() { *m = ListValue{} }
-func (m *ListValue) String() string { return proto.CompactTextString(m) }
-func (*ListValue) ProtoMessage() {}
-func (*ListValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{2}
-}
-
-func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
-
-func (m *ListValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ListValue.Unmarshal(m, b)
-}
-func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
-}
-func (m *ListValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListValue.Merge(m, src)
-}
-func (m *ListValue) XXX_Size() int {
- return xxx_messageInfo_ListValue.Size(m)
-}
-func (m *ListValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ListValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListValue proto.InternalMessageInfo
-
-func (m *ListValue) GetValues() []*Value {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
- proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
- proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
- proto.RegisterType((*Value)(nil), "google.protobuf.Value")
- proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
-}
-
-func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
-
-var fileDescriptor_df322afd6c9fb402 = []byte{
- // 417 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
- 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
- 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
- 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
- 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
- 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
- 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
- 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
- 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
- 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
- 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
- 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
- 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
- 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
- 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
- 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
- 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
- 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
- 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
- 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
- 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
- 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
- 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
- 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
- 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
- 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
- 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
deleted file mode 100644
index 7d7808e7f..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
+++ /dev/null
@@ -1,96 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "StructProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-message Struct {
- // Unordered map of dynamically typed values.
- map fields = 1;
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-message Value {
- // The kind of value.
- oneof kind {
- // Represents a null value.
- NullValue null_value = 1;
- // Represents a double value.
- double number_value = 2;
- // Represents a string value.
- string string_value = 3;
- // Represents a boolean value.
- bool bool_value = 4;
- // Represents a structured value.
- Struct struct_value = 5;
- // Represents a repeated `Value`.
- ListValue list_value = 6;
- }
-}
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-// The JSON representation for `NullValue` is JSON `null`.
-enum NullValue {
- // Null value.
- NULL_VALUE = 0;
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-message ListValue {
- // Repeated field of dynamically typed values.
- repeated Value values = 1;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
deleted file mode 100644
index add19a1ad..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/wrappers.proto
-
-package wrappers
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Wrapper message for `double`.
-//
-// The JSON representation for `DoubleValue` is JSON number.
-type DoubleValue struct {
- // The double value.
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DoubleValue) Reset() { *m = DoubleValue{} }
-func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
-func (*DoubleValue) ProtoMessage() {}
-func (*DoubleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{0}
-}
-
-func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
-
-func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
-}
-func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
-}
-func (m *DoubleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DoubleValue.Merge(m, src)
-}
-func (m *DoubleValue) XXX_Size() int {
- return xxx_messageInfo_DoubleValue.Size(m)
-}
-func (m *DoubleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_DoubleValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
-
-func (m *DoubleValue) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// Wrapper message for `float`.
-//
-// The JSON representation for `FloatValue` is JSON number.
-type FloatValue struct {
- // The float value.
- Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FloatValue) Reset() { *m = FloatValue{} }
-func (m *FloatValue) String() string { return proto.CompactTextString(m) }
-func (*FloatValue) ProtoMessage() {}
-func (*FloatValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{1}
-}
-
-func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
-
-func (m *FloatValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FloatValue.Unmarshal(m, b)
-}
-func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
-}
-func (m *FloatValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FloatValue.Merge(m, src)
-}
-func (m *FloatValue) XXX_Size() int {
- return xxx_messageInfo_FloatValue.Size(m)
-}
-func (m *FloatValue) XXX_DiscardUnknown() {
- xxx_messageInfo_FloatValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FloatValue proto.InternalMessageInfo
-
-func (m *FloatValue) GetValue() float32 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// Wrapper message for `int64`.
-//
-// The JSON representation for `Int64Value` is JSON string.
-type Int64Value struct {
- // The int64 value.
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Int64Value) Reset() { *m = Int64Value{} }
-func (m *Int64Value) String() string { return proto.CompactTextString(m) }
-func (*Int64Value) ProtoMessage() {}
-func (*Int64Value) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{2}
-}
-
-func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
-
-func (m *Int64Value) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Int64Value.Unmarshal(m, b)
-}
-func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
-}
-func (m *Int64Value) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Int64Value.Merge(m, src)
-}
-func (m *Int64Value) XXX_Size() int {
- return xxx_messageInfo_Int64Value.Size(m)
-}
-func (m *Int64Value) XXX_DiscardUnknown() {
- xxx_messageInfo_Int64Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Int64Value proto.InternalMessageInfo
-
-func (m *Int64Value) GetValue() int64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// Wrapper message for `uint64`.
-//
-// The JSON representation for `UInt64Value` is JSON string.
-type UInt64Value struct {
- // The uint64 value.
- Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UInt64Value) Reset() { *m = UInt64Value{} }
-func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
-func (*UInt64Value) ProtoMessage() {}
-func (*UInt64Value) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{3}
-}
-
-func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
-
-func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
-}
-func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
-}
-func (m *UInt64Value) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UInt64Value.Merge(m, src)
-}
-func (m *UInt64Value) XXX_Size() int {
- return xxx_messageInfo_UInt64Value.Size(m)
-}
-func (m *UInt64Value) XXX_DiscardUnknown() {
- xxx_messageInfo_UInt64Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
-
-func (m *UInt64Value) GetValue() uint64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// Wrapper message for `int32`.
-//
-// The JSON representation for `Int32Value` is JSON number.
-type Int32Value struct {
- // The int32 value.
- Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Int32Value) Reset() { *m = Int32Value{} }
-func (m *Int32Value) String() string { return proto.CompactTextString(m) }
-func (*Int32Value) ProtoMessage() {}
-func (*Int32Value) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{4}
-}
-
-func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
-
-func (m *Int32Value) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Int32Value.Unmarshal(m, b)
-}
-func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
-}
-func (m *Int32Value) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Int32Value.Merge(m, src)
-}
-func (m *Int32Value) XXX_Size() int {
- return xxx_messageInfo_Int32Value.Size(m)
-}
-func (m *Int32Value) XXX_DiscardUnknown() {
- xxx_messageInfo_Int32Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Int32Value proto.InternalMessageInfo
-
-func (m *Int32Value) GetValue() int32 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// Wrapper message for `uint32`.
-//
-// The JSON representation for `UInt32Value` is JSON number.
-type UInt32Value struct {
- // The uint32 value.
- Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UInt32Value) Reset() { *m = UInt32Value{} }
-func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
-func (*UInt32Value) ProtoMessage() {}
-func (*UInt32Value) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{5}
-}
-
-func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
-
-func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
-}
-func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
-}
-func (m *UInt32Value) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UInt32Value.Merge(m, src)
-}
-func (m *UInt32Value) XXX_Size() int {
- return xxx_messageInfo_UInt32Value.Size(m)
-}
-func (m *UInt32Value) XXX_DiscardUnknown() {
- xxx_messageInfo_UInt32Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
-
-func (m *UInt32Value) GetValue() uint32 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-// Wrapper message for `bool`.
-//
-// The JSON representation for `BoolValue` is JSON `true` and `false`.
-type BoolValue struct {
- // The bool value.
- Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BoolValue) Reset() { *m = BoolValue{} }
-func (m *BoolValue) String() string { return proto.CompactTextString(m) }
-func (*BoolValue) ProtoMessage() {}
-func (*BoolValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{6}
-}
-
-func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
-
-func (m *BoolValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BoolValue.Unmarshal(m, b)
-}
-func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
-}
-func (m *BoolValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BoolValue.Merge(m, src)
-}
-func (m *BoolValue) XXX_Size() int {
- return xxx_messageInfo_BoolValue.Size(m)
-}
-func (m *BoolValue) XXX_DiscardUnknown() {
- xxx_messageInfo_BoolValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BoolValue proto.InternalMessageInfo
-
-func (m *BoolValue) GetValue() bool {
- if m != nil {
- return m.Value
- }
- return false
-}
-
-// Wrapper message for `string`.
-//
-// The JSON representation for `StringValue` is JSON string.
-type StringValue struct {
- // The string value.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StringValue) Reset() { *m = StringValue{} }
-func (m *StringValue) String() string { return proto.CompactTextString(m) }
-func (*StringValue) ProtoMessage() {}
-func (*StringValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{7}
-}
-
-func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
-
-func (m *StringValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StringValue.Unmarshal(m, b)
-}
-func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
-}
-func (m *StringValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StringValue.Merge(m, src)
-}
-func (m *StringValue) XXX_Size() int {
- return xxx_messageInfo_StringValue.Size(m)
-}
-func (m *StringValue) XXX_DiscardUnknown() {
- xxx_messageInfo_StringValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StringValue proto.InternalMessageInfo
-
-func (m *StringValue) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-// Wrapper message for `bytes`.
-//
-// The JSON representation for `BytesValue` is JSON string.
-type BytesValue struct {
- // The bytes value.
- Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BytesValue) Reset() { *m = BytesValue{} }
-func (m *BytesValue) String() string { return proto.CompactTextString(m) }
-func (*BytesValue) ProtoMessage() {}
-func (*BytesValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_5377b62bda767935, []int{8}
-}
-
-func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
-
-func (m *BytesValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BytesValue.Unmarshal(m, b)
-}
-func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
-}
-func (m *BytesValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BytesValue.Merge(m, src)
-}
-func (m *BytesValue) XXX_Size() int {
- return xxx_messageInfo_BytesValue.Size(m)
-}
-func (m *BytesValue) XXX_DiscardUnknown() {
- xxx_messageInfo_BytesValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BytesValue proto.InternalMessageInfo
-
-func (m *BytesValue) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
- proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
- proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
- proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
- proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
- proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
- proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
- proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
- proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
-}
-
-func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
-
-var fileDescriptor_5377b62bda767935 = []byte{
- // 259 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
- 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
- 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
- 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
- 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
- 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
- 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
- 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
- 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
- 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
- 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
- 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
- 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
- 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
- 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
- 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
deleted file mode 100644
index 01947639a..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
+++ /dev/null
@@ -1,118 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/wrappers";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "WrappersProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// Wrapper message for `double`.
-//
-// The JSON representation for `DoubleValue` is JSON number.
-message DoubleValue {
- // The double value.
- double value = 1;
-}
-
-// Wrapper message for `float`.
-//
-// The JSON representation for `FloatValue` is JSON number.
-message FloatValue {
- // The float value.
- float value = 1;
-}
-
-// Wrapper message for `int64`.
-//
-// The JSON representation for `Int64Value` is JSON string.
-message Int64Value {
- // The int64 value.
- int64 value = 1;
-}
-
-// Wrapper message for `uint64`.
-//
-// The JSON representation for `UInt64Value` is JSON string.
-message UInt64Value {
- // The uint64 value.
- uint64 value = 1;
-}
-
-// Wrapper message for `int32`.
-//
-// The JSON representation for `Int32Value` is JSON number.
-message Int32Value {
- // The int32 value.
- int32 value = 1;
-}
-
-// Wrapper message for `uint32`.
-//
-// The JSON representation for `UInt32Value` is JSON number.
-message UInt32Value {
- // The uint32 value.
- uint32 value = 1;
-}
-
-// Wrapper message for `bool`.
-//
-// The JSON representation for `BoolValue` is JSON `true` and `false`.
-message BoolValue {
- // The bool value.
- bool value = 1;
-}
-
-// Wrapper message for `string`.
-//
-// The JSON representation for `StringValue` is JSON string.
-message StringValue {
- // The string value.
- string value = 1;
-}
-
-// Wrapper message for `bytes`.
-//
-// The JSON representation for `BytesValue` is JSON string.
-message BytesValue {
- // The bytes value.
- bytes value = 1;
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
deleted file mode 100644
index 364516251..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2015, Gengo, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name of Gengo, Inc. nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
deleted file mode 100644
index 76cafe6ec..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
+++ /dev/null
@@ -1,22 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-
-package(default_visibility = ["//visibility:public"])
-
-proto_library(
- name = "internal_proto",
- srcs = ["stream_chunk.proto"],
- deps = ["@com_google_protobuf//:any_proto"],
-)
-
-go_proto_library(
- name = "internal_go_proto",
- importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
- proto = ":internal_proto",
-)
-
-go_library(
- name = "go_default_library",
- embed = [":internal_go_proto"],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
deleted file mode 100644
index 8858f0690..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: internal/stream_chunk.proto
-
-package internal
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import any "github.com/golang/protobuf/ptypes/any"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// StreamError is a response type which is returned when
-// streaming rpc returns an error.
-type StreamError struct {
- GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
- HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
- Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
- HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
- Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StreamError) Reset() { *m = StreamError{} }
-func (m *StreamError) String() string { return proto.CompactTextString(m) }
-func (*StreamError) ProtoMessage() {}
-func (*StreamError) Descriptor() ([]byte, []int) {
- return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0}
-}
-func (m *StreamError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamError.Unmarshal(m, b)
-}
-func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
-}
-func (dst *StreamError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamError.Merge(dst, src)
-}
-func (m *StreamError) XXX_Size() int {
- return xxx_messageInfo_StreamError.Size(m)
-}
-func (m *StreamError) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamError proto.InternalMessageInfo
-
-func (m *StreamError) GetGrpcCode() int32 {
- if m != nil {
- return m.GrpcCode
- }
- return 0
-}
-
-func (m *StreamError) GetHttpCode() int32 {
- if m != nil {
- return m.HttpCode
- }
- return 0
-}
-
-func (m *StreamError) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *StreamError) GetHttpStatus() string {
- if m != nil {
- return m.HttpStatus
- }
- return ""
-}
-
-func (m *StreamError) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
-}
-
-func init() {
- proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7)
-}
-
-var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{
- // 223 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30,
- 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23,
- 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78,
- 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce,
- 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2,
- 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a,
- 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f,
- 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54,
- 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7,
- 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5,
- 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9,
- 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac,
- 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18,
- 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
deleted file mode 100644
index 55f42ce63..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
+++ /dev/null
@@ -1,15 +0,0 @@
-syntax = "proto3";
-package grpc.gateway.runtime;
-option go_package = "internal";
-
-import "google/protobuf/any.proto";
-
-// StreamError is a response type which is returned when
-// streaming rpc returns an error.
-message StreamError {
- int32 grpc_code = 1;
- int32 http_code = 2;
- string message = 3;
- string http_status = 4;
- repeated google.protobuf.Any details = 5;
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
deleted file mode 100644
index c99f83e58..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
+++ /dev/null
@@ -1,80 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-go_library(
- name = "go_default_library",
- srcs = [
- "context.go",
- "convert.go",
- "doc.go",
- "errors.go",
- "fieldmask.go",
- "handler.go",
- "marshal_json.go",
- "marshal_jsonpb.go",
- "marshal_proto.go",
- "marshaler.go",
- "marshaler_registry.go",
- "mux.go",
- "pattern.go",
- "proto2_convert.go",
- "proto_errors.go",
- "query.go",
- ],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
- deps = [
- "//internal:go_default_library",
- "//utilities:go_default_library",
- "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
- "@com_github_golang_protobuf//proto:go_default_library",
- "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
- "@io_bazel_rules_go//proto/wkt:any_go_proto",
- "@io_bazel_rules_go//proto/wkt:duration_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
- "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
- "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//grpclog:go_default_library",
- "@org_golang_google_grpc//metadata:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "context_test.go",
- "errors_test.go",
- "fieldmask_test.go",
- "handler_test.go",
- "marshal_json_test.go",
- "marshal_jsonpb_test.go",
- "marshal_proto_test.go",
- "marshaler_registry_test.go",
- "mux_test.go",
- "pattern_test.go",
- "query_test.go",
- ],
- embed = [":go_default_library"],
- deps = [
- "//examples/proto/examplepb:go_default_library",
- "//internal:go_default_library",
- "//utilities:go_default_library",
- "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
- "@com_github_golang_protobuf//proto:go_default_library",
- "@com_github_golang_protobuf//ptypes:go_default_library_gen",
- "@go_googleapis//google/rpc:errdetails_go_proto",
- "@io_bazel_rules_go//proto/wkt:duration_go_proto",
- "@io_bazel_rules_go//proto/wkt:empty_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
- "@io_bazel_rules_go//proto/wkt:struct_go_proto",
- "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
- "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
- "@org_golang_google_grpc//:go_default_library",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//metadata:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
deleted file mode 100644
index 896057e1e..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package runtime
-
-import (
- "context"
- "encoding/base64"
- "fmt"
- "net"
- "net/http"
- "net/textproto"
- "strconv"
- "strings"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// MetadataHeaderPrefix is the http prefix that represents custom metadata
-// parameters to or from a gRPC call.
-const MetadataHeaderPrefix = "Grpc-Metadata-"
-
-// MetadataPrefix is prepended to permanent HTTP header keys (as specified
-// by the IANA) when added to the gRPC context.
-const MetadataPrefix = "grpcgateway-"
-
-// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
-// HTTP headers in a response handled by grpc-gateway
-const MetadataTrailerPrefix = "Grpc-Trailer-"
-
-const metadataGrpcTimeout = "Grpc-Timeout"
-const metadataHeaderBinarySuffix = "-Bin"
-
-const xForwardedFor = "X-Forwarded-For"
-const xForwardedHost = "X-Forwarded-Host"
-
-var (
- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
- // header isn't present. If the value is 0 the sent `context` will not have a timeout.
- DefaultContextTimeout = 0 * time.Second
-)
-
-func decodeBinHeader(v string) ([]byte, error) {
- if len(v)%4 == 0 {
- // Input was padded, or padding was not necessary.
- return base64.StdEncoding.DecodeString(v)
- }
- return base64.RawStdEncoding.DecodeString(v)
-}
-
-/*
-AnnotateContext adds context information such as metadata from the request.
-
-At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
-except that the forwarded destination is not another HTTP service but rather
-a gRPC service.
-*/
-func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
- var pairs []string
- timeout := DefaultContextTimeout
- if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
- var err error
- timeout, err = timeoutDecode(tm)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
- }
- }
-
- for key, vals := range req.Header {
- for _, val := range vals {
- key = textproto.CanonicalMIMEHeaderKey(key)
- // For backwards-compatibility, pass through 'authorization' header with no prefix.
- if key == "Authorization" {
- pairs = append(pairs, "authorization", val)
- }
- if h, ok := mux.incomingHeaderMatcher(key); ok {
- // Handles "-bin" metadata in grpc, since grpc will do another base64
- // encode before sending to server, we need to decode it first.
- if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
- b, err := decodeBinHeader(val)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
- }
-
- val = string(b)
- }
- pairs = append(pairs, h, val)
- }
- }
- }
- if host := req.Header.Get(xForwardedHost); host != "" {
- pairs = append(pairs, strings.ToLower(xForwardedHost), host)
- } else if req.Host != "" {
- pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
- }
-
- if addr := req.RemoteAddr; addr != "" {
- if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
- if fwd := req.Header.Get(xForwardedFor); fwd == "" {
- pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
- } else {
- pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
- }
- } else {
- grpclog.Infof("invalid remote addr: %s", addr)
- }
- }
-
- if timeout != 0 {
- ctx, _ = context.WithTimeout(ctx, timeout)
- }
- if len(pairs) == 0 {
- return ctx, nil
- }
- md := metadata.Pairs(pairs...)
- for _, mda := range mux.metadataAnnotators {
- md = metadata.Join(md, mda(ctx, req))
- }
- return metadata.NewOutgoingContext(ctx, md), nil
-}
-
-// ServerMetadata consists of metadata sent from gRPC server.
-type ServerMetadata struct {
- HeaderMD metadata.MD
- TrailerMD metadata.MD
-}
-
-type serverMetadataKey struct{}
-
-// NewServerMetadataContext creates a new context with ServerMetadata
-func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
- return context.WithValue(ctx, serverMetadataKey{}, md)
-}
-
-// ServerMetadataFromContext returns the ServerMetadata in ctx
-func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
- md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
- return
-}
-
-func timeoutDecode(s string) (time.Duration, error) {
- size := len(s)
- if size < 2 {
- return 0, fmt.Errorf("timeout string is too short: %q", s)
- }
- d, ok := timeoutUnitToDuration(s[size-1])
- if !ok {
- return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
- }
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
- if err != nil {
- return 0, err
- }
- return d * time.Duration(t), nil
-}
-
-func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
- switch u {
- case 'H':
- return time.Hour, true
- case 'M':
- return time.Minute, true
- case 'S':
- return time.Second, true
- case 'm':
- return time.Millisecond, true
- case 'u':
- return time.Microsecond, true
- case 'n':
- return time.Nanosecond, true
- default:
- }
- return
-}
-
-// isPermanentHTTPHeader checks whether hdr belongs to the list of
-// permenant request headers maintained by IANA.
-// http://www.iana.org/assignments/message-headers/message-headers.xml
-func isPermanentHTTPHeader(hdr string) bool {
- switch hdr {
- case
- "Accept",
- "Accept-Charset",
- "Accept-Language",
- "Accept-Ranges",
- "Authorization",
- "Cache-Control",
- "Content-Type",
- "Cookie",
- "Date",
- "Expect",
- "From",
- "Host",
- "If-Match",
- "If-Modified-Since",
- "If-None-Match",
- "If-Schedule-Tag-Match",
- "If-Unmodified-Since",
- "Max-Forwards",
- "Origin",
- "Pragma",
- "Referer",
- "User-Agent",
- "Via",
- "Warning":
- return true
- }
- return false
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
deleted file mode 100644
index a5b3bd6a7..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/golang/protobuf/jsonpb"
- "github.com/golang/protobuf/ptypes/duration"
- "github.com/golang/protobuf/ptypes/timestamp"
- "github.com/golang/protobuf/ptypes/wrappers"
-)
-
-// String just returns the given string.
-// It is just for compatibility to other types.
-func String(val string) (string, error) {
- return val, nil
-}
-
-// StringSlice converts 'val' where individual strings are separated by
-// 'sep' into a string slice.
-func StringSlice(val, sep string) ([]string, error) {
- return strings.Split(val, sep), nil
-}
-
-// Bool converts the given string representation of a boolean value into bool.
-func Bool(val string) (bool, error) {
- return strconv.ParseBool(val)
-}
-
-// BoolSlice converts 'val' where individual booleans are separated by
-// 'sep' into a bool slice.
-func BoolSlice(val, sep string) ([]bool, error) {
- s := strings.Split(val, sep)
- values := make([]bool, len(s))
- for i, v := range s {
- value, err := Bool(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Float64 converts the given string representation into representation of a floating point number into float64.
-func Float64(val string) (float64, error) {
- return strconv.ParseFloat(val, 64)
-}
-
-// Float64Slice converts 'val' where individual floating point numbers are separated by
-// 'sep' into a float64 slice.
-func Float64Slice(val, sep string) ([]float64, error) {
- s := strings.Split(val, sep)
- values := make([]float64, len(s))
- for i, v := range s {
- value, err := Float64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Float32 converts the given string representation of a floating point number into float32.
-func Float32(val string) (float32, error) {
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-// Float32Slice converts 'val' where individual floating point numbers are separated by
-// 'sep' into a float32 slice.
-func Float32Slice(val, sep string) ([]float32, error) {
- s := strings.Split(val, sep)
- values := make([]float32, len(s))
- for i, v := range s {
- value, err := Float32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Int64 converts the given string representation of an integer into int64.
-func Int64(val string) (int64, error) {
- return strconv.ParseInt(val, 0, 64)
-}
-
-// Int64Slice converts 'val' where individual integers are separated by
-// 'sep' into a int64 slice.
-func Int64Slice(val, sep string) ([]int64, error) {
- s := strings.Split(val, sep)
- values := make([]int64, len(s))
- for i, v := range s {
- value, err := Int64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Int32 converts the given string representation of an integer into int32.
-func Int32(val string) (int32, error) {
- i, err := strconv.ParseInt(val, 0, 32)
- if err != nil {
- return 0, err
- }
- return int32(i), nil
-}
-
-// Int32Slice converts 'val' where individual integers are separated by
-// 'sep' into a int32 slice.
-func Int32Slice(val, sep string) ([]int32, error) {
- s := strings.Split(val, sep)
- values := make([]int32, len(s))
- for i, v := range s {
- value, err := Int32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Uint64 converts the given string representation of an integer into uint64.
-func Uint64(val string) (uint64, error) {
- return strconv.ParseUint(val, 0, 64)
-}
-
-// Uint64Slice converts 'val' where individual integers are separated by
-// 'sep' into a uint64 slice.
-func Uint64Slice(val, sep string) ([]uint64, error) {
- s := strings.Split(val, sep)
- values := make([]uint64, len(s))
- for i, v := range s {
- value, err := Uint64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Uint32 converts the given string representation of an integer into uint32.
-func Uint32(val string) (uint32, error) {
- i, err := strconv.ParseUint(val, 0, 32)
- if err != nil {
- return 0, err
- }
- return uint32(i), nil
-}
-
-// Uint32Slice converts 'val' where individual integers are separated by
-// 'sep' into a uint32 slice.
-func Uint32Slice(val, sep string) ([]uint32, error) {
- s := strings.Split(val, sep)
- values := make([]uint32, len(s))
- for i, v := range s {
- value, err := Uint32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Bytes converts the given string representation of a byte sequence into a slice of bytes
-// A bytes sequence is encoded in URL-safe base64 without padding
-func Bytes(val string) ([]byte, error) {
- b, err := base64.StdEncoding.DecodeString(val)
- if err != nil {
- b, err = base64.URLEncoding.DecodeString(val)
- if err != nil {
- return nil, err
- }
- }
- return b, nil
-}
-
-// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
-// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
-func BytesSlice(val, sep string) ([][]byte, error) {
- s := strings.Split(val, sep)
- values := make([][]byte, len(s))
- for i, v := range s {
- value, err := Bytes(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
-func Timestamp(val string) (*timestamp.Timestamp, error) {
- var r *timestamp.Timestamp
- err := jsonpb.UnmarshalString(val, r)
- return r, err
-}
-
-// Duration converts the given string into a timestamp.Duration.
-func Duration(val string) (*duration.Duration, error) {
- var r *duration.Duration
- err := jsonpb.UnmarshalString(val, r)
- return r, err
-}
-
-// Enum converts the given string into an int32 that should be type casted into the
-// correct enum proto type.
-func Enum(val string, enumValMap map[string]int32) (int32, error) {
- e, ok := enumValMap[val]
- if ok {
- return e, nil
- }
-
- i, err := Int32(val)
- if err != nil {
- return 0, fmt.Errorf("%s is not valid", val)
- }
- for _, v := range enumValMap {
- if v == i {
- return i, nil
- }
- }
- return 0, fmt.Errorf("%s is not valid", val)
-}
-
-// EnumSlice converts 'val' where individual enums are separated by 'sep'
-// into a int32 slice. Each individual int32 should be type casted into the
-// correct enum proto type.
-func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
- s := strings.Split(val, sep)
- values := make([]int32, len(s))
- for i, v := range s {
- value, err := Enum(v, enumValMap)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-/*
- Support fot google.protobuf.wrappers on top of primitive types
-*/
-
-// StringValue well-known type support as wrapper around string type
-func StringValue(val string) (*wrappers.StringValue, error) {
- return &wrappers.StringValue{Value: val}, nil
-}
-
-// FloatValue well-known type support as wrapper around float32 type
-func FloatValue(val string) (*wrappers.FloatValue, error) {
- parsedVal, err := Float32(val)
- return &wrappers.FloatValue{Value: parsedVal}, err
-}
-
-// DoubleValue well-known type support as wrapper around float64 type
-func DoubleValue(val string) (*wrappers.DoubleValue, error) {
- parsedVal, err := Float64(val)
- return &wrappers.DoubleValue{Value: parsedVal}, err
-}
-
-// BoolValue well-known type support as wrapper around bool type
-func BoolValue(val string) (*wrappers.BoolValue, error) {
- parsedVal, err := Bool(val)
- return &wrappers.BoolValue{Value: parsedVal}, err
-}
-
-// Int32Value well-known type support as wrapper around int32 type
-func Int32Value(val string) (*wrappers.Int32Value, error) {
- parsedVal, err := Int32(val)
- return &wrappers.Int32Value{Value: parsedVal}, err
-}
-
-// UInt32Value well-known type support as wrapper around uint32 type
-func UInt32Value(val string) (*wrappers.UInt32Value, error) {
- parsedVal, err := Uint32(val)
- return &wrappers.UInt32Value{Value: parsedVal}, err
-}
-
-// Int64Value well-known type support as wrapper around int64 type
-func Int64Value(val string) (*wrappers.Int64Value, error) {
- parsedVal, err := Int64(val)
- return &wrappers.Int64Value{Value: parsedVal}, err
-}
-
-// UInt64Value well-known type support as wrapper around uint64 type
-func UInt64Value(val string) (*wrappers.UInt64Value, error) {
- parsedVal, err := Uint64(val)
- return &wrappers.UInt64Value{Value: parsedVal}, err
-}
-
-// BytesValue well-known type support as wrapper around bytes[] type
-func BytesValue(val string) (*wrappers.BytesValue, error) {
- parsedVal, err := Bytes(val)
- return &wrappers.BytesValue{Value: parsedVal}, err
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
deleted file mode 100644
index b6e5ddf7a..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
-Package runtime contains runtime helper functions used by
-servers which protoc-gen-grpc-gateway generates.
-*/
-package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
deleted file mode 100644
index 41d54ef91..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package runtime
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
-// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-func HTTPStatusFromCode(code codes.Code) int {
- switch code {
- case codes.OK:
- return http.StatusOK
- case codes.Canceled:
- return http.StatusRequestTimeout
- case codes.Unknown:
- return http.StatusInternalServerError
- case codes.InvalidArgument:
- return http.StatusBadRequest
- case codes.DeadlineExceeded:
- return http.StatusGatewayTimeout
- case codes.NotFound:
- return http.StatusNotFound
- case codes.AlreadyExists:
- return http.StatusConflict
- case codes.PermissionDenied:
- return http.StatusForbidden
- case codes.Unauthenticated:
- return http.StatusUnauthorized
- case codes.ResourceExhausted:
- return http.StatusTooManyRequests
- case codes.FailedPrecondition:
- return http.StatusPreconditionFailed
- case codes.Aborted:
- return http.StatusConflict
- case codes.OutOfRange:
- return http.StatusBadRequest
- case codes.Unimplemented:
- return http.StatusNotImplemented
- case codes.Internal:
- return http.StatusInternalServerError
- case codes.Unavailable:
- return http.StatusServiceUnavailable
- case codes.DataLoss:
- return http.StatusInternalServerError
- }
-
- grpclog.Infof("Unknown gRPC error code: %v", code)
- return http.StatusInternalServerError
-}
-
-var (
- // HTTPError replies to the request with the error.
- // You can set a custom function to this variable to customize error format.
- HTTPError = DefaultHTTPError
- // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
- OtherErrorHandler = DefaultOtherErrorHandler
-)
-
-type errorBody struct {
- Error string `protobuf:"bytes,1,name=error" json:"error"`
- // This is to make the error more compatible with users that expect errors to be Status objects:
- // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
- // It should be the exact same message as the Error field.
- Message string `protobuf:"bytes,1,name=message" json:"message"`
- Code int32 `protobuf:"varint,2,name=code" json:"code"`
- Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
-}
-
-// Make this also conform to proto.Message for builtin JSONPb Marshaler
-func (e *errorBody) Reset() { *e = errorBody{} }
-func (e *errorBody) String() string { return proto.CompactTextString(e) }
-func (*errorBody) ProtoMessage() {}
-
-// DefaultHTTPError is the default implementation of HTTPError.
-// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
-// If otherwise, it replies with http.StatusInternalServerError.
-//
-// The response body returned by this function is a JSON object,
-// which contains a member whose key is "error" and whose value is err.Error().
-func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
- const fallback = `{"error": "failed to marshal error message"}`
-
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
-
- w.Header().Del("Trailer")
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatability
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
- pb := s.Proto()
- contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
- }
- w.Header().Set("Content-Type", contentType)
-
- body := &errorBody{
- Error: s.Message(),
- Message: s.Message(),
- Code: int32(s.Code()),
- Details: s.Proto().GetDetails(),
- }
-
- buf, merr := marshaler.Marshal(body)
- if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
- w.WriteHeader(http.StatusInternalServerError)
- if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
- st := HTTPStatusFromCode(s.Code())
- w.WriteHeader(st)
- if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
-// It simply writes a string representation of the given error into "w".
-func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
- http.Error(w, msg, code)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
deleted file mode 100644
index e1cf7a914..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package runtime
-
-import (
- "encoding/json"
- "io"
- "strings"
-
- "github.com/golang/protobuf/protoc-gen-go/generator"
- "google.golang.org/genproto/protobuf/field_mask"
-)
-
-// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
-func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) {
- fm := &field_mask.FieldMask{}
- var root interface{}
- if err := json.NewDecoder(r).Decode(&root); err != nil {
- if err == io.EOF {
- return fm, nil
- }
- return nil, err
- }
-
- queue := []fieldMaskPathItem{{node: root}}
- for len(queue) > 0 {
- // dequeue an item
- item := queue[0]
- queue = queue[1:]
-
- if m, ok := item.node.(map[string]interface{}); ok {
- // if the item is an object, then enqueue all of its children
- for k, v := range m {
- queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v})
- }
- } else if len(item.path) > 0 {
- // otherwise, it's a leaf node so print its path
- fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
- }
- }
-
- return fm, nil
-}
-
-// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
-type fieldMaskPathItem struct {
- // the list of prior fields leading up to node
- path []string
-
- // a generic decoded json object the current item to inspect for further path extraction
- node interface{}
-}
-
-// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic
-// that's used for naming protobuf fields in Go.
-func CamelCaseFieldMask(mask *field_mask.FieldMask) {
- if mask == nil || mask.Paths == nil {
- return
- }
-
- var newPaths []string
- for _, path := range mask.Paths {
- lowerCasedParts := strings.Split(path, ".")
- var camelCasedParts []string
- for _, part := range lowerCasedParts {
- camelCasedParts = append(camelCasedParts, generator.CamelCase(part))
- }
- newPaths = append(newPaths, strings.Join(camelCasedParts, "."))
- }
-
- mask.Paths = newPaths
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
deleted file mode 100644
index 1fc63f7f5..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package runtime
-
-import (
- "fmt"
- "io"
- "net/http"
- "net/textproto"
-
- "context"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
- "github.com/grpc-ecosystem/grpc-gateway/internal"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// ForwardResponseStream forwards the stream from gRPC server to REST client.
-func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- f, ok := w.(http.Flusher)
- if !ok {
- grpclog.Infof("Flush not supported in %T", w)
- http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- http.Error(w, "unexpected error", http.StatusInternalServerError)
- return
- }
- handleForwardResponseServerMetadata(w, mux, md)
-
- w.Header().Set("Transfer-Encoding", "chunked")
- w.Header().Set("Content-Type", marshaler.ContentType())
- if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
-
- var delimiter []byte
- if d, ok := marshaler.(Delimited); ok {
- delimiter = d.Delimiter()
- } else {
- delimiter = []byte("\n")
- }
-
- var wroteHeader bool
- for {
- resp, err := recv()
- if err == io.EOF {
- return
- }
- if err != nil {
- handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
- return
- }
- if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
- return
- }
-
- buf, err := marshaler.Marshal(streamChunk(resp, nil))
- if err != nil {
- grpclog.Infof("Failed to marshal response chunk: %v", err)
- handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
- return
- }
- if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to send response chunk: %v", err)
- return
- }
- wroteHeader = true
- if _, err = w.Write(delimiter); err != nil {
- grpclog.Infof("Failed to send delimiter chunk: %v", err)
- return
- }
- f.Flush()
- }
-}
-
-func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
- for k, vs := range md.HeaderMD {
- if h, ok := mux.outgoingHeaderMatcher(k); ok {
- for _, v := range vs {
- w.Header().Add(h, v)
- }
- }
- }
-}
-
-func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
- for k := range md.TrailerMD {
- tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
- w.Header().Add("Trailer", tKey)
- }
-}
-
-func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
- for k, vs := range md.TrailerMD {
- tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
- for _, v := range vs {
- w.Header().Add(tKey, v)
- }
- }
-}
-
-// responseBody interface contains method for getting field for marshaling to the response body
-// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
-type responseBody interface {
- XXX_ResponseBody() interface{}
-}
-
-// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
-func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatability
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
- contentType = httpBodyMarshaler.ContentTypeFromMessage(resp)
- }
- w.Header().Set("Content-Type", contentType)
-
- if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
- var buf []byte
- var err error
- if rb, ok := resp.(responseBody); ok {
- buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
- } else {
- buf, err = marshaler.Marshal(resp)
- }
- if err != nil {
- grpclog.Infof("Marshal error: %v", err)
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
-
- if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
- if len(opts) == 0 {
- return nil
- }
- for _, opt := range opts {
- if err := opt(ctx, w, resp); err != nil {
- grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
- return err
- }
- }
- return nil
-}
-
-func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) {
- buf, merr := marshaler.Marshal(streamChunk(nil, err))
- if merr != nil {
- grpclog.Infof("Failed to marshal an error: %v", merr)
- return
- }
- if !wroteHeader {
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
- w.WriteHeader(HTTPStatusFromCode(s.Code()))
- }
- if _, werr := w.Write(buf); werr != nil {
- grpclog.Infof("Failed to notify error to client: %v", werr)
- return
- }
-}
-
-func streamChunk(result proto.Message, err error) map[string]proto.Message {
- if err != nil {
- grpcCode := codes.Unknown
- grpcMessage := err.Error()
- var grpcDetails []*any.Any
- if s, ok := status.FromError(err); ok {
- grpcCode = s.Code()
- grpcMessage = s.Message()
- grpcDetails = s.Proto().GetDetails()
- }
- httpCode := HTTPStatusFromCode(grpcCode)
- return map[string]proto.Message{
- "error": &internal.StreamError{
- GrpcCode: int32(grpcCode),
- HttpCode: int32(httpCode),
- Message: grpcMessage,
- HttpStatus: http.StatusText(httpCode),
- Details: grpcDetails,
- },
- }
- }
- if result == nil {
- return streamChunk(nil, fmt.Errorf("empty response"))
- }
- return map[string]proto.Message{"result": result}
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
deleted file mode 100644
index f55285b5d..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package runtime
-
-import (
- "google.golang.org/genproto/googleapis/api/httpbody"
-)
-
-// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
-func SetHTTPBodyMarshaler(serveMux *ServeMux) {
- serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
- Marshaler: &JSONPb{OrigName: true},
- }
-}
-
-// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
-// google.api.HttpBody message as the full response body if it is
-// the actual message used as the response. If not, then this will
-// simply fallback to the Marshaler specified as its default Marshaler.
-type HTTPBodyMarshaler struct {
- Marshaler
-}
-
-// ContentType implementation to keep backwards compatability with marshal interface
-func (h *HTTPBodyMarshaler) ContentType() string {
- return h.ContentTypeFromMessage(nil)
-}
-
-// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
-// its specified content type otherwise fall back to the default Marshaler.
-func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
- if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.GetContentType()
- }
- return h.Marshaler.ContentType()
-}
-
-// Marshal marshals "v" by returning the body bytes if v is a
-// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
-func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
- if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.Data, nil
- }
- return h.Marshaler.Marshal(v)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
deleted file mode 100644
index f9d3a585a..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package runtime
-
-import (
- "encoding/json"
- "io"
-)
-
-// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
-// with the standard "encoding/json" package of Golang.
-// Although it is generally faster for simple proto messages than JSONPb,
-// it does not support advanced features of protobuf, e.g. map, oneof, ....
-//
-// The NewEncoder and NewDecoder types return *json.Encoder and
-// *json.Decoder respectively.
-type JSONBuiltin struct{}
-
-// ContentType always Returns "application/json".
-func (*JSONBuiltin) ContentType() string {
- return "application/json"
-}
-
-// Marshal marshals "v" into JSON
-func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
- return json.Marshal(v)
-}
-
-// Unmarshal unmarshals JSON data into "v".
-func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
- return json.Unmarshal(data, v)
-}
-
-// NewDecoder returns a Decoder which reads JSON stream from "r".
-func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
- return json.NewDecoder(r)
-}
-
-// NewEncoder returns an Encoder which writes JSON stream into "w".
-func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
- return json.NewEncoder(w)
-}
-
-// Delimiter for newline encoded JSON streams.
-func (j *JSONBuiltin) Delimiter() []byte {
- return []byte("\n")
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
deleted file mode 100644
index 3530dddd0..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package runtime
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
-
- "github.com/golang/protobuf/jsonpb"
- "github.com/golang/protobuf/proto"
-)
-
-// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
-// with the "github.com/golang/protobuf/jsonpb".
-// It supports fully functionality of protobuf unlike JSONBuiltin.
-//
-// The NewDecoder method returns a DecoderWrapper, so the underlying
-// *json.Decoder methods can be used.
-type JSONPb jsonpb.Marshaler
-
-// ContentType always returns "application/json".
-func (*JSONPb) ContentType() string {
- return "application/json"
-}
-
-// Marshal marshals "v" into JSON.
-func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
- if _, ok := v.(proto.Message); !ok {
- return j.marshalNonProtoField(v)
- }
-
- var buf bytes.Buffer
- if err := j.marshalTo(&buf, v); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
- p, ok := v.(proto.Message)
- if !ok {
- buf, err := j.marshalNonProtoField(v)
- if err != nil {
- return err
- }
- _, err = w.Write(buf)
- return err
- }
- return (*jsonpb.Marshaler)(j).Marshal(w, p)
-}
-
-var (
- // protoMessageType is stored to prevent constant lookup of the same type at runtime.
- protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
-)
-
-// marshalNonProto marshals a non-message field of a protobuf message.
-// This function does not correctly marshals arbitrary data structure into JSON,
-// but it is only capable of marshaling non-message field values of protobuf,
-// i.e. primitive types, enums; pointers to primitives or enums; maps from
-// integer/string types to primitives/enums/pointers to messages.
-func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
- if v == nil {
- return []byte("null"), nil
- }
- rv := reflect.ValueOf(v)
- for rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return []byte("null"), nil
- }
- rv = rv.Elem()
- }
-
- if rv.Kind() == reflect.Slice {
- if rv.IsNil() {
- if j.EmitDefaults {
- return []byte("[]"), nil
- }
- return []byte("null"), nil
- }
-
- if rv.Type().Elem().Implements(protoMessageType) {
- var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
- return nil, err
- }
- for i := 0; i < rv.Len(); i++ {
- if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
- return nil, err
- }
- }
- if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
- return nil, err
- }
- }
- err = buf.WriteByte(']')
- if err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
- }
- }
-
- if rv.Kind() == reflect.Map {
- m := make(map[string]*json.RawMessage)
- for _, k := range rv.MapKeys() {
- buf, err := j.Marshal(rv.MapIndex(k).Interface())
- if err != nil {
- return nil, err
- }
- m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
- }
- if j.Indent != "" {
- return json.MarshalIndent(m, "", j.Indent)
- }
- return json.Marshal(m)
- }
- if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
- return json.Marshal(enum.String())
- }
- return json.Marshal(rv.Interface())
-}
-
-// Unmarshal unmarshals JSON "data" into "v"
-func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
- return unmarshalJSONPb(data, v)
-}
-
-// NewDecoder returns a Decoder which reads JSON stream from "r".
-func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
- d := json.NewDecoder(r)
- return DecoderWrapper{Decoder: d}
-}
-
-// DecoderWrapper is a wrapper around a *json.Decoder that adds
-// support for protos to the Decode method.
-type DecoderWrapper struct {
- *json.Decoder
-}
-
-// Decode wraps the embedded decoder's Decode method to support
-// protos using a jsonpb.Unmarshaler.
-func (d DecoderWrapper) Decode(v interface{}) error {
- return decodeJSONPb(d.Decoder, v)
-}
-
-// NewEncoder returns an Encoder which writes JSON stream into "w".
-func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
- return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) })
-}
-
-func unmarshalJSONPb(data []byte, v interface{}) error {
- d := json.NewDecoder(bytes.NewReader(data))
- return decodeJSONPb(d, v)
-}
-
-func decodeJSONPb(d *json.Decoder, v interface{}) error {
- p, ok := v.(proto.Message)
- if !ok {
- return decodeNonProtoField(d, v)
- }
- unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
- return unmarshaler.UnmarshalNext(d, p)
-}
-
-func decodeNonProtoField(d *json.Decoder, v interface{}) error {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return fmt.Errorf("%T is not a pointer", v)
- }
- for rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- rv.Set(reflect.New(rv.Type().Elem()))
- }
- if rv.Type().ConvertibleTo(typeProtoMessage) {
- unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
- return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
- }
- rv = rv.Elem()
- }
- if rv.Kind() == reflect.Map {
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- conv, ok := convFromType[rv.Type().Key().Kind()]
- if !ok {
- return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
- }
-
- m := make(map[string]*json.RawMessage)
- if err := d.Decode(&m); err != nil {
- return err
- }
- for k, v := range m {
- result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- bk := result[0]
- bv := reflect.New(rv.Type().Elem())
- if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
- return err
- }
- rv.SetMapIndex(bk, bv.Elem())
- }
- return nil
- }
- if _, ok := rv.Interface().(protoEnum); ok {
- var repr interface{}
- if err := d.Decode(&repr); err != nil {
- return err
- }
- switch repr.(type) {
- case string:
- // TODO(yugui) Should use proto.StructProperties?
- return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
- case float64:
- rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
- return nil
- default:
- return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
- }
- }
- return d.Decode(v)
-}
-
-type protoEnum interface {
- fmt.Stringer
- EnumDescriptor() ([]byte, []int)
-}
-
-var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
-
-// Delimiter for newline encoded JSON streams.
-func (j *JSONPb) Delimiter() []byte {
- return []byte("\n")
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
deleted file mode 100644
index f65d1a267..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package runtime
-
-import (
- "io"
-
- "errors"
- "github.com/golang/protobuf/proto"
- "io/ioutil"
-)
-
-// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
-type ProtoMarshaller struct{}
-
-// ContentType always returns "application/octet-stream".
-func (*ProtoMarshaller) ContentType() string {
- return "application/octet-stream"
-}
-
-// Marshal marshals "value" into Proto
-func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
- message, ok := value.(proto.Message)
- if !ok {
- return nil, errors.New("unable to marshal non proto field")
- }
- return proto.Marshal(message)
-}
-
-// Unmarshal unmarshals proto "data" into "value"
-func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
- message, ok := value.(proto.Message)
- if !ok {
- return errors.New("unable to unmarshal non proto field")
- }
- return proto.Unmarshal(data, message)
-}
-
-// NewDecoder returns a Decoder which reads proto stream from "reader".
-func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
- return DecoderFunc(func(value interface{}) error {
- buffer, err := ioutil.ReadAll(reader)
- if err != nil {
- return err
- }
- return marshaller.Unmarshal(buffer, value)
- })
-}
-
-// NewEncoder returns an Encoder which writes proto stream into "writer".
-func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
- return EncoderFunc(func(value interface{}) error {
- buffer, err := marshaller.Marshal(value)
- if err != nil {
- return err
- }
- _, err = writer.Write(buffer)
- if err != nil {
- return err
- }
-
- return nil
- })
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
deleted file mode 100644
index 98fe6e88a..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package runtime
-
-import (
- "io"
-)
-
-// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
-type Marshaler interface {
- // Marshal marshals "v" into byte sequence.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal unmarshals "data" into "v".
- // "v" must be a pointer value.
- Unmarshal(data []byte, v interface{}) error
- // NewDecoder returns a Decoder which reads byte sequence from "r".
- NewDecoder(r io.Reader) Decoder
- // NewEncoder returns an Encoder which writes bytes sequence into "w".
- NewEncoder(w io.Writer) Encoder
- // ContentType returns the Content-Type which this marshaler is responsible for.
- ContentType() string
-}
-
-// Decoder decodes a byte sequence
-type Decoder interface {
- Decode(v interface{}) error
-}
-
-// Encoder encodes gRPC payloads / fields into byte sequence.
-type Encoder interface {
- Encode(v interface{}) error
-}
-
-// DecoderFunc adapts an decoder function into Decoder.
-type DecoderFunc func(v interface{}) error
-
-// Decode delegates invocations to the underlying function itself.
-func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
-
-// EncoderFunc adapts an encoder function into Encoder
-type EncoderFunc func(v interface{}) error
-
-// Encode delegates invocations to the underlying function itself.
-func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
-
-// Delimited defines the streaming delimiter.
-type Delimited interface {
- // Delimiter returns the record seperator for the stream.
- Delimiter() []byte
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
deleted file mode 100644
index 5cc53ae4f..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package runtime
-
-import (
- "errors"
- "net/http"
-)
-
-// MIMEWildcard is the fallback MIME type used for requests which do not match
-// a registered MIME type.
-const MIMEWildcard = "*"
-
-var (
- acceptHeader = http.CanonicalHeaderKey("Accept")
- contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
-
- defaultMarshaler = &JSONPb{OrigName: true}
-)
-
-// MarshalerForRequest returns the inbound/outbound marshalers for this request.
-// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
-// If it isn't set (or the request Content-Type is empty), checks for "*".
-// If there are multiple Content-Type headers set, choose the first one that it can
-// exactly match in the registry.
-// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
-func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
- for _, acceptVal := range r.Header[acceptHeader] {
- if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
- outbound = m
- break
- }
- }
-
- for _, contentTypeVal := range r.Header[contentTypeHeader] {
- if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
- inbound = m
- break
- }
- }
-
- if inbound == nil {
- inbound = mux.marshalers.mimeMap[MIMEWildcard]
- }
- if outbound == nil {
- outbound = inbound
- }
-
- return inbound, outbound
-}
-
-// marshalerRegistry is a mapping from MIME types to Marshalers.
-type marshalerRegistry struct {
- mimeMap map[string]Marshaler
-}
-
-// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
-// MIME type).
-func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
- if len(mime) == 0 {
- return errors.New("empty MIME type")
- }
-
- m.mimeMap[mime] = marshaler
-
- return nil
-}
-
-// makeMarshalerMIMERegistry returns a new registry of marshalers.
-// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
-//
-// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
-// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
-// with a "application/json" Content-Type.
-// "*" can be used to match any Content-Type.
-// This can be attached to a ServerMux with the marshaler option.
-func makeMarshalerMIMERegistry() marshalerRegistry {
- return marshalerRegistry{
- mimeMap: map[string]Marshaler{
- MIMEWildcard: defaultMarshaler,
- },
- }
-}
-
-// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
-// Marshalers to a MIME type in mux.
-func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
- return func(mux *ServeMux) {
- if err := mux.marshalers.add(mime, marshaler); err != nil {
- panic(err)
- }
- }
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
deleted file mode 100644
index ec81e55b5..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package runtime
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/textproto"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// A HandlerFunc handles a specific pair of path pattern and HTTP method.
-type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
-
-// ServeMux is a request multiplexer for grpc-gateway.
-// It matches http requests to patterns and invokes the corresponding handler.
-type ServeMux struct {
- // handlers maps HTTP method to a list of handlers.
- handlers map[string][]handler
- forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
- marshalers marshalerRegistry
- incomingHeaderMatcher HeaderMatcherFunc
- outgoingHeaderMatcher HeaderMatcherFunc
- metadataAnnotators []func(context.Context, *http.Request) metadata.MD
- protoErrorHandler ProtoErrorHandlerFunc
- disablePathLengthFallback bool
-}
-
-// ServeMuxOption is an option that can be given to a ServeMux on construction.
-type ServeMuxOption func(*ServeMux)
-
-// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
-//
-// forwardResponseOption is an option that will be called on the relevant context.Context,
-// http.ResponseWriter, and proto.Message before every forwarded response.
-//
-// The message may be nil in the case where just a header is being sent.
-func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
- }
-}
-
-// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
-type HeaderMatcherFunc func(string) (string, bool)
-
-// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
-func DefaultHeaderMatcher(key string) (string, bool) {
- key = textproto.CanonicalMIMEHeaderKey(key)
- if isPermanentHTTPHeader(key) {
- return MetadataPrefix + key, true
- } else if strings.HasPrefix(key, MetadataHeaderPrefix) {
- return key[len(MetadataHeaderPrefix):], true
- }
- return "", false
-}
-
-// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
-//
-// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
-// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
-func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
- return func(mux *ServeMux) {
- mux.incomingHeaderMatcher = fn
- }
-}
-
-// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
-//
-// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
-// passed to http response returned from gateway. To transform the header before passing to response,
-// matcher should return modified header.
-func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
- return func(mux *ServeMux) {
- mux.outgoingHeaderMatcher = fn
- }
-}
-
-// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
-//
-// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
-// is reading token from cookie and adding it in gRPC context.
-func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
- }
-}
-
-// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
-//
-// This can be used to handle an error as general proto message defined by gRPC.
-// The response including body and status is not backward compatible with the default error handler.
-// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
-func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.protoErrorHandler = fn
- }
-}
-
-// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
-func WithDisablePathLengthFallback() ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.disablePathLengthFallback = true
- }
-}
-
-// NewServeMux returns a new ServeMux whose internal mapping is empty.
-func NewServeMux(opts ...ServeMuxOption) *ServeMux {
- serveMux := &ServeMux{
- handlers: make(map[string][]handler),
- forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
- marshalers: makeMarshalerMIMERegistry(),
- }
-
- for _, opt := range opts {
- opt(serveMux)
- }
-
- if serveMux.protoErrorHandler != nil {
- HTTPError = serveMux.protoErrorHandler
- // OtherErrorHandler is no longer used when protoErrorHandler is set.
- // Overwritten by a special error handler to return Unknown.
- OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
- ctx := context.Background()
- _, outboundMarshaler := MarshalerForRequest(serveMux, r)
- sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
- serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
- }
- }
-
- if serveMux.incomingHeaderMatcher == nil {
- serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
- }
-
- if serveMux.outgoingHeaderMatcher == nil {
- serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
- return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
- }
- }
-
- return serveMux
-}
-
-// Handle associates "h" to the pair of HTTP method and path pattern.
-func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
- s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
-}
-
-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
-func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
-
- path := r.URL.Path
- if !strings.HasPrefix(path, "/") {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
- }
- return
- }
-
- components := strings.Split(path[1:], "/")
- l := len(components)
- var verb string
- if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
- }
- return
- } else if idx > 0 {
- c := components[l-1]
- components[l-1], verb = c[:idx], c[idx+1:]
- }
-
- if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
- r.Method = strings.ToUpper(override)
- if err := r.ParseForm(); err != nil {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, err.Error())
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
- }
- return
- }
- }
- for _, h := range s.handlers[r.Method] {
- pathParams, err := h.pat.Match(components, verb)
- if err != nil {
- continue
- }
- h.h(w, r, pathParams)
- return
- }
-
- // lookup other methods to handle fallback from GET to POST and
- // to determine if it is MethodNotAllowed or NotFound.
- for m, handlers := range s.handlers {
- if m == r.Method {
- continue
- }
- for _, h := range handlers {
- pathParams, err := h.pat.Match(components, verb)
- if err != nil {
- continue
- }
- // X-HTTP-Method-Override is optional. Always allow fallback to POST.
- if s.isPathLengthFallback(r) {
- if err := r.ParseForm(); err != nil {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, err.Error())
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
- }
- return
- }
- h.h(w, r, pathParams)
- return
- }
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed))
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
- }
- return
- }
- }
-
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
- }
-}
-
-// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
-func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
- return s.forwardResponseOptions
-}
-
-func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
- return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
-}
-
-type handler struct {
- pat Pattern
- h HandlerFunc
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
deleted file mode 100644
index f16a84ad3..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package runtime
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
- ErrNotMatch = errors.New("not match to the path pattern")
- // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
- ErrInvalidPattern = errors.New("invalid pattern")
-)
-
-type op struct {
- code utilities.OpCode
- operand int
-}
-
-// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
-type Pattern struct {
- // ops is a list of operations
- ops []op
- // pool is a constant pool indexed by the operands or vars.
- pool []string
- // vars is a list of variables names to be bound by this pattern
- vars []string
- // stacksize is the max depth of the stack
- stacksize int
- // tailLen is the length of the fixed-size segments after a deep wildcard
- tailLen int
- // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
- verb string
-}
-
-// NewPattern returns a new Pattern from the given definition values.
-// "ops" is a sequence of op codes. "pool" is a constant pool.
-// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
-// "version" must be 1 for now.
-// It returns an error if the given definition is invalid.
-func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
- if version != 1 {
- grpclog.Infof("unsupported version: %d", version)
- return Pattern{}, ErrInvalidPattern
- }
-
- l := len(ops)
- if l%2 != 0 {
- grpclog.Infof("odd number of ops codes: %d", l)
- return Pattern{}, ErrInvalidPattern
- }
-
- var (
- typedOps []op
- stack, maxstack int
- tailLen int
- pushMSeen bool
- vars []string
- )
- for i := 0; i < l; i += 2 {
- op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush:
- if pushMSeen {
- tailLen++
- }
- stack++
- case utilities.OpPushM:
- if pushMSeen {
- grpclog.Infof("pushM appears twice")
- return Pattern{}, ErrInvalidPattern
- }
- pushMSeen = true
- stack++
- case utilities.OpLitPush:
- if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("negative literal index: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- if pushMSeen {
- tailLen++
- }
- stack++
- case utilities.OpConcatN:
- if op.operand <= 0 {
- grpclog.Infof("negative concat size: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- stack -= op.operand
- if stack < 0 {
- grpclog.Print("stack underflow")
- return Pattern{}, ErrInvalidPattern
- }
- stack++
- case utilities.OpCapture:
- if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("variable name index out of bound: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- v := pool[op.operand]
- op.operand = len(vars)
- vars = append(vars, v)
- stack--
- if stack < 0 {
- grpclog.Infof("stack underflow")
- return Pattern{}, ErrInvalidPattern
- }
- default:
- grpclog.Infof("invalid opcode: %d", op.code)
- return Pattern{}, ErrInvalidPattern
- }
-
- if maxstack < stack {
- maxstack = stack
- }
- typedOps = append(typedOps, op)
- }
- return Pattern{
- ops: typedOps,
- pool: pool,
- vars: vars,
- stacksize: maxstack,
- tailLen: tailLen,
- verb: verb,
- }, nil
-}
-
-// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
-func MustPattern(p Pattern, err error) Pattern {
- if err != nil {
- grpclog.Fatalf("Pattern initialization failed: %v", err)
- }
- return p
-}
-
-// Match examines components if it matches to the Pattern.
-// If it matches, the function returns a mapping from field paths to their captured values.
-// If otherwise, the function returns an error.
-func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
- if p.verb != verb {
- return nil, ErrNotMatch
- }
-
- var pos int
- stack := make([]string, 0, p.stacksize)
- captured := make([]string, len(p.vars))
- l := len(components)
- for _, op := range p.ops {
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush, utilities.OpLitPush:
- if pos >= l {
- return nil, ErrNotMatch
- }
- c := components[pos]
- if op.code == utilities.OpLitPush {
- if lit := p.pool[op.operand]; c != lit {
- return nil, ErrNotMatch
- }
- }
- stack = append(stack, c)
- pos++
- case utilities.OpPushM:
- end := len(components)
- if end < pos+p.tailLen {
- return nil, ErrNotMatch
- }
- end -= p.tailLen
- stack = append(stack, strings.Join(components[pos:end], "/"))
- pos = end
- case utilities.OpConcatN:
- n := op.operand
- l := len(stack) - n
- stack = append(stack[:l], strings.Join(stack[l:], "/"))
- case utilities.OpCapture:
- n := len(stack) - 1
- captured[op.operand] = stack[n]
- stack = stack[:n]
- }
- }
- if pos < l {
- return nil, ErrNotMatch
- }
- bindings := make(map[string]string)
- for i, val := range captured {
- bindings[p.vars[i]] = val
- }
- return bindings, nil
-}
-
-// Verb returns the verb part of the Pattern.
-func (p Pattern) Verb() string { return p.verb }
-
-func (p Pattern) String() string {
- var stack []string
- for _, op := range p.ops {
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush:
- stack = append(stack, "*")
- case utilities.OpLitPush:
- stack = append(stack, p.pool[op.operand])
- case utilities.OpPushM:
- stack = append(stack, "**")
- case utilities.OpConcatN:
- n := op.operand
- l := len(stack) - n
- stack = append(stack[:l], strings.Join(stack[l:], "/"))
- case utilities.OpCapture:
- n := len(stack) - 1
- stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
- }
- }
- segs := strings.Join(stack, "/")
- if p.verb != "" {
- return fmt.Sprintf("/%s:%s", segs, p.verb)
- }
- return "/" + segs
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
deleted file mode 100644
index a3151e2a5..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package runtime
-
-import (
- "github.com/golang/protobuf/proto"
-)
-
-// StringP returns a pointer to a string whose pointee is same as the given string value.
-func StringP(val string) (*string, error) {
- return proto.String(val), nil
-}
-
-// BoolP parses the given string representation of a boolean value,
-// and returns a pointer to a bool whose value is same as the parsed value.
-func BoolP(val string) (*bool, error) {
- b, err := Bool(val)
- if err != nil {
- return nil, err
- }
- return proto.Bool(b), nil
-}
-
-// Float64P parses the given string representation of a floating point number,
-// and returns a pointer to a float64 whose value is same as the parsed number.
-func Float64P(val string) (*float64, error) {
- f, err := Float64(val)
- if err != nil {
- return nil, err
- }
- return proto.Float64(f), nil
-}
-
-// Float32P parses the given string representation of a floating point number,
-// and returns a pointer to a float32 whose value is same as the parsed number.
-func Float32P(val string) (*float32, error) {
- f, err := Float32(val)
- if err != nil {
- return nil, err
- }
- return proto.Float32(f), nil
-}
-
-// Int64P parses the given string representation of an integer
-// and returns a pointer to a int64 whose value is same as the parsed integer.
-func Int64P(val string) (*int64, error) {
- i, err := Int64(val)
- if err != nil {
- return nil, err
- }
- return proto.Int64(i), nil
-}
-
-// Int32P parses the given string representation of an integer
-// and returns a pointer to a int32 whose value is same as the parsed integer.
-func Int32P(val string) (*int32, error) {
- i, err := Int32(val)
- if err != nil {
- return nil, err
- }
- return proto.Int32(i), err
-}
-
-// Uint64P parses the given string representation of an integer
-// and returns a pointer to a uint64 whose value is same as the parsed integer.
-func Uint64P(val string) (*uint64, error) {
- i, err := Uint64(val)
- if err != nil {
- return nil, err
- }
- return proto.Uint64(i), err
-}
-
-// Uint32P parses the given string representation of an integer
-// and returns a pointer to a uint32 whose value is same as the parsed integer.
-func Uint32P(val string) (*uint32, error) {
- i, err := Uint32(val)
- if err != nil {
- return nil, err
- }
- return proto.Uint32(i), err
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
deleted file mode 100644
index b7fa32e45..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package runtime
-
-import (
- "io"
- "net/http"
-
- "context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
-type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
-
-var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
-
-// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
-// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
-// If otherwise, it replies with http.StatusInternalServerError.
-//
-// The response body returned by this function is a Status message marshaled by a Marshaler.
-//
-// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
-func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
- // return Internal when Marshal failed
- const fallback = `{"code": 13, "message": "failed to marshal error message"}`
-
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
-
- w.Header().Del("Trailer")
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatability
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
- pb := s.Proto()
- contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
- }
- w.Header().Set("Content-Type", contentType)
-
- buf, merr := marshaler.Marshal(s.Proto())
- if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
- w.WriteHeader(http.StatusInternalServerError)
- if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
- st := HTTPStatusFromCode(s.Code())
- w.WriteHeader(st)
- if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
deleted file mode 100644
index bb9359f17..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
+++ /dev/null
@@ -1,392 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "net/url"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc/grpclog"
-)
-
-// PopulateQueryParameters populates "values" into "msg".
-// A value is ignored if its key starts with one of the elements in "filter".
-func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
- for key, values := range values {
- re, err := regexp.Compile("^(.*)\\[(.*)\\]$")
- if err != nil {
- return err
- }
- match := re.FindStringSubmatch(key)
- if len(match) == 3 {
- key = match[1]
- values = append([]string{match[2]}, values...)
- }
- fieldPath := strings.Split(key, ".")
- if filter.HasCommonPrefix(fieldPath) {
- continue
- }
- if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
- return err
- }
- }
- return nil
-}
-
-// PopulateFieldFromPath sets a value in a nested Protobuf structure.
-// It instantiates missing protobuf fields as it goes.
-func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
- fieldPath := strings.Split(fieldPathString, ".")
- return populateFieldValueFromPath(msg, fieldPath, []string{value})
-}
-
-func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
- m := reflect.ValueOf(msg)
- if m.Kind() != reflect.Ptr {
- return fmt.Errorf("unexpected type %T: %v", msg, msg)
- }
- var props *proto.Properties
- m = m.Elem()
- for i, fieldName := range fieldPath {
- isLast := i == len(fieldPath)-1
- if !isLast && m.Kind() != reflect.Struct {
- return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
- }
- var f reflect.Value
- var err error
- f, props, err = fieldByProtoName(m, fieldName)
- if err != nil {
- return err
- } else if !f.IsValid() {
- grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
- return nil
- }
-
- switch f.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
- if !isLast {
- return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
- }
- m = f
- case reflect.Slice:
- if !isLast {
- return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
- }
- // Handle []byte
- if f.Type().Elem().Kind() == reflect.Uint8 {
- m = f
- break
- }
- return populateRepeatedField(f, values, props)
- case reflect.Ptr:
- if f.IsNil() {
- m = reflect.New(f.Type().Elem())
- f.Set(m.Convert(f.Type()))
- }
- m = f.Elem()
- continue
- case reflect.Struct:
- m = f
- continue
- case reflect.Map:
- if !isLast {
- return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
- }
- return populateMapField(f, values, props)
- default:
- return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
- }
- }
- switch len(values) {
- case 0:
- return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
- case 1:
- default:
- grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
- }
- return populateField(m, values[0], props)
-}
-
-// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
-// "m" must be a struct value. It returns zero reflect.Value if no such field found.
-func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
- props := proto.GetProperties(m.Type())
-
- // look up field name in oneof map
- if op, ok := props.OneofTypes[name]; ok {
- v := reflect.New(op.Type.Elem())
- field := m.Field(op.Field)
- if !field.IsNil() {
- return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
- }
- field.Set(v)
- return v.Elem().Field(0), op.Prop, nil
- }
-
- for _, p := range props.Prop {
- if p.OrigName == name {
- return m.FieldByName(p.Name), p, nil
- }
- if p.JSONName == name {
- return m.FieldByName(p.Name), p, nil
- }
- }
- return reflect.Value{}, nil, nil
-}
-
-func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
- if len(values) != 2 {
- return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
- }
-
- key, value := values[0], values[1]
- keyType := f.Type().Key()
- valueType := f.Type().Elem()
- if f.IsNil() {
- f.Set(reflect.MakeMap(f.Type()))
- }
-
- keyConv, ok := convFromType[keyType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
- }
- valueConv, ok := convFromType[valueType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
- }
-
- keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
- if err := keyV[1].Interface(); err != nil {
- return err.(error)
- }
- valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
- if err := valueV[1].Interface(); err != nil {
- return err.(error)
- }
-
- f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
-
- return nil
-}
-
-func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
- elemType := f.Type().Elem()
-
- // is the destination field a slice of an enumeration type?
- if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
- return populateFieldEnumRepeated(f, values, enumValMap)
- }
-
- conv, ok := convFromType[elemType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported field type %s", elemType)
- }
- f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
- for i, v := range values {
- result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
- }
- return nil
-}
-
-func populateField(f reflect.Value, value string, props *proto.Properties) error {
- i := f.Addr().Interface()
-
- // Handle protobuf well known types
- type wkt interface {
- XXX_WellKnownType() string
- }
- if wkt, ok := i.(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "Timestamp":
- if value == "null" {
- f.Field(0).SetInt(0)
- f.Field(1).SetInt(0)
- return nil
- }
-
- t, err := time.Parse(time.RFC3339Nano, value)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
- f.Field(0).SetInt(int64(t.Unix()))
- f.Field(1).SetInt(int64(t.Nanosecond()))
- return nil
- case "Duration":
- if value == "null" {
- f.Field(0).SetInt(0)
- f.Field(1).SetInt(0)
- return nil
- }
- d, err := time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- ns := d.Nanoseconds()
- s := ns / 1e9
- ns %= 1e9
- f.Field(0).SetInt(s)
- f.Field(1).SetInt(ns)
- return nil
- case "DoubleValue":
- fallthrough
- case "FloatValue":
- float64Val, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.Field(0).SetFloat(float64Val)
- return nil
- case "Int64Value":
- fallthrough
- case "Int32Value":
- int64Val, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.Field(0).SetInt(int64Val)
- return nil
- case "UInt64Value":
- fallthrough
- case "UInt32Value":
- uint64Val, err := strconv.ParseUint(value, 10, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.Field(0).SetUint(uint64Val)
- return nil
- case "BoolValue":
- if value == "true" {
- f.Field(0).SetBool(true)
- } else if value == "false" {
- f.Field(0).SetBool(false)
- } else {
- return fmt.Errorf("bad BoolValue: %s", value)
- }
- return nil
- case "StringValue":
- f.Field(0).SetString(value)
- return nil
- case "BytesValue":
- bytesVal, err := base64.StdEncoding.DecodeString(value)
- if err != nil {
- return fmt.Errorf("bad BytesValue: %s", value)
- }
- f.Field(0).SetBytes(bytesVal)
- return nil
- }
- }
-
- // Handle google well known types
- if gwkt, ok := i.(proto.Message); ok {
- switch proto.MessageName(gwkt) {
- case "google.protobuf.FieldMask":
- p := f.Field(0)
- for _, v := range strings.Split(value, ",") {
- if v != "" {
- p.Set(reflect.Append(p, reflect.ValueOf(v)))
- }
- }
- return nil
- }
- }
-
- // Handle Time and Duration stdlib types
- switch t := i.(type) {
- case *time.Time:
- pt, err := time.Parse(time.RFC3339Nano, value)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
- *t = pt
- return nil
- case *time.Duration:
- d, err := time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
- *t = d
- return nil
- }
-
- // is the destination field an enumeration type?
- if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
- return populateFieldEnum(f, value, enumValMap)
- }
-
- conv, ok := convFromType[f.Kind()]
- if !ok {
- return fmt.Errorf("field type %T is not supported in query parameters", i)
- }
- result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- f.Set(result[0].Convert(f.Type()))
- return nil
-}
-
-func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
- // see if it's an enumeration string
- if enumVal, ok := enumValMap[value]; ok {
- return reflect.ValueOf(enumVal).Convert(t), nil
- }
-
- // check for an integer that matches an enumeration value
- eVal, err := strconv.Atoi(value)
- if err != nil {
- return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
- }
- for _, v := range enumValMap {
- if v == int32(eVal) {
- return reflect.ValueOf(eVal).Convert(t), nil
- }
- }
- return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
-}
-
-func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
- cval, err := convertEnum(value, f.Type(), enumValMap)
- if err != nil {
- return err
- }
- f.Set(cval)
- return nil
-}
-
-func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
- elemType := f.Type().Elem()
- f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
- for i, v := range values {
- result, err := convertEnum(v, elemType, enumValMap)
- if err != nil {
- return err
- }
- f.Index(i).Set(result)
- }
- return nil
-}
-
-var (
- convFromType = map[reflect.Kind]reflect.Value{
- reflect.String: reflect.ValueOf(String),
- reflect.Bool: reflect.ValueOf(Bool),
- reflect.Float64: reflect.ValueOf(Float64),
- reflect.Float32: reflect.ValueOf(Float32),
- reflect.Int64: reflect.ValueOf(Int64),
- reflect.Int32: reflect.ValueOf(Int32),
- reflect.Uint64: reflect.ValueOf(Uint64),
- reflect.Uint32: reflect.ValueOf(Uint32),
- reflect.Slice: reflect.ValueOf(Bytes),
- }
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
deleted file mode 100644
index 7109d7932..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
+++ /dev/null
@@ -1,21 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-go_library(
- name = "go_default_library",
- srcs = [
- "doc.go",
- "pattern.go",
- "readerfactory.go",
- "trie.go",
- ],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = ["trie_test.go"],
- embed = [":go_default_library"],
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
deleted file mode 100644
index cf79a4d58..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package utilities provides members for internal use in grpc-gateway.
-package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
deleted file mode 100644
index dfe7de486..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package utilities
-
-// An OpCode is a opcode of compiled path patterns.
-type OpCode int
-
-// These constants are the valid values of OpCode.
-const (
- // OpNop does nothing
- OpNop = OpCode(iota)
- // OpPush pushes a component to stack
- OpPush
- // OpLitPush pushes a component to stack if it matches to the literal
- OpLitPush
- // OpPushM concatenates the remaining components and pushes it to stack
- OpPushM
- // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
- OpConcatN
- // OpCapture pops an item and binds it to the variable
- OpCapture
- // OpEnd is the least positive invalid opcode.
- OpEnd
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
deleted file mode 100644
index 6dd385466..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package utilities
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
-// at the start of the stream
-func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- return func() io.Reader {
- return bytes.NewReader(b)
- }, nil
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
deleted file mode 100644
index c2b7b30dd..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package utilities
-
-import (
- "sort"
-)
-
-// DoubleArray is a Double Array implementation of trie on sequences of strings.
-type DoubleArray struct {
- // Encoding keeps an encoding from string to int
- Encoding map[string]int
- // Base is the base array of Double Array
- Base []int
- // Check is the check array of Double Array
- Check []int
-}
-
-// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
-func NewDoubleArray(seqs [][]string) *DoubleArray {
- da := &DoubleArray{Encoding: make(map[string]int)}
- if len(seqs) == 0 {
- return da
- }
-
- encoded := registerTokens(da, seqs)
- sort.Sort(byLex(encoded))
-
- root := node{row: -1, col: -1, left: 0, right: len(encoded)}
- addSeqs(da, encoded, 0, root)
-
- for i := len(da.Base); i > 0; i-- {
- if da.Check[i-1] != 0 {
- da.Base = da.Base[:i]
- da.Check = da.Check[:i]
- break
- }
- }
- return da
-}
-
-func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
- var result [][]int
- for _, seq := range seqs {
- var encoded []int
- for _, token := range seq {
- if _, ok := da.Encoding[token]; !ok {
- da.Encoding[token] = len(da.Encoding)
- }
- encoded = append(encoded, da.Encoding[token])
- }
- result = append(result, encoded)
- }
- for i := range result {
- result[i] = append(result[i], len(da.Encoding))
- }
- return result
-}
-
-type node struct {
- row, col int
- left, right int
-}
-
-func (n node) value(seqs [][]int) int {
- return seqs[n.row][n.col]
-}
-
-func (n node) children(seqs [][]int) []*node {
- var result []*node
- lastVal := int(-1)
- last := new(node)
- for i := n.left; i < n.right; i++ {
- if lastVal == seqs[i][n.col+1] {
- continue
- }
- last.right = i
- last = &node{
- row: i,
- col: n.col + 1,
- left: i,
- }
- result = append(result, last)
- }
- last.right = n.right
- return result
-}
-
-func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
- ensureSize(da, pos)
-
- children := n.children(seqs)
- var i int
- for i = 1; ; i++ {
- ok := func() bool {
- for _, child := range children {
- code := child.value(seqs)
- j := i + code
- ensureSize(da, j)
- if da.Check[j] != 0 {
- return false
- }
- }
- return true
- }()
- if ok {
- break
- }
- }
- da.Base[pos] = i
- for _, child := range children {
- code := child.value(seqs)
- j := i + code
- da.Check[j] = pos + 1
- }
- terminator := len(da.Encoding)
- for _, child := range children {
- code := child.value(seqs)
- if code == terminator {
- continue
- }
- j := i + code
- addSeqs(da, seqs, j, *child)
- }
-}
-
-func ensureSize(da *DoubleArray, i int) {
- for i >= len(da.Base) {
- da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
- da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
- }
-}
-
-type byLex [][]int
-
-func (l byLex) Len() int { return len(l) }
-func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l byLex) Less(i, j int) bool {
- si := l[i]
- sj := l[j]
- var k int
- for k = 0; k < len(si) && k < len(sj); k++ {
- if si[k] < sj[k] {
- return true
- }
- if si[k] > sj[k] {
- return false
- }
- }
- if k < len(sj) {
- return true
- }
- return false
-}
-
-// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
-func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
- if len(da.Base) == 0 {
- return false
- }
-
- var i int
- for _, t := range seq {
- code, ok := da.Encoding[t]
- if !ok {
- break
- }
- j := da.Base[i] + code
- if len(da.Check) <= j || da.Check[j] != i+1 {
- break
- }
- i = j
- }
- j := da.Base[i] + len(da.Encoding)
- if len(da.Check) <= j || da.Check[j] != i+1 {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
deleted file mode 100644
index 38aca78e4..000000000
--- a/vendor/github.com/minio/minio-go/functional_tests.go
+++ /dev/null
@@ -1,9868 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "mime/multipart"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- humanize "github.com/dustin/go-humanize"
- minio "github.com/minio/minio-go"
- log "github.com/sirupsen/logrus"
-
- "github.com/minio/minio-go/pkg/encrypt"
-)
-
-const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
-const (
- letterIdxBits = 6 // 6 bits to represent a letter index
- letterIdxMask = 1<= 0; {
- if remain == 0 {
- cache, remain = src.Int63(), letterIdxMax
- }
- if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
- b[i] = letterBytes[idx]
- i--
- }
- cache >>= letterIdxBits
- remain--
- }
- return prefix + string(b[0:30-len(prefix)])
-}
-
-var dataFileMap = map[string]int{
- "datafile-1-b": 1,
- "datafile-10-kB": 10 * humanize.KiByte,
- "datafile-33-kB": 33 * humanize.KiByte,
- "datafile-100-kB": 100 * humanize.KiByte,
- "datafile-1.03-MB": 1056 * humanize.KiByte,
- "datafile-1-MB": 1 * humanize.MiByte,
- "datafile-5-MB": 5 * humanize.MiByte,
- "datafile-6-MB": 6 * humanize.MiByte,
- "datafile-11-MB": 11 * humanize.MiByte,
- "datafile-65-MB": 65 * humanize.MiByte,
-}
-
-func isFullMode() bool {
- return os.Getenv("MINT_MODE") == "full"
-}
-
-func getFuncName() string {
- pc, _, _, _ := runtime.Caller(1)
- return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
-}
-
-// Tests bucket re-create errors.
-func testMakeBucketError() {
- region := "eu-central-1"
-
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "MakeBucket(bucketName, region)"
- // initialize logging params
- args := map[string]interface{}{
- "bucketName": "",
- "region": region,
- }
-
- // skipping region functional tests for non s3 runs
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
- return
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, region); err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
- return
- }
- if err = c.MakeBucket(bucketName, region); err == nil {
- logError(testName, function, args, startTime, "", "Bucket already exists", err)
- return
- }
- // Verify valid error response from server.
- if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testMetadataSizeLimit() {
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "opts.UserMetadata": "",
- }
- rand.Seed(startTime.Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client creation failed", err)
- return
- }
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "Make bucket failed", err)
- return
- }
-
- const HeaderSizeLimit = 8 * 1024
- const UserMetadataLimit = 2 * 1024
-
- // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
- metadata := make(map[string]string)
- metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
- args["metadata"] = fmt.Sprint(metadata)
-
- _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
- if err == nil {
- logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
- return
- }
-
- // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
- metadata = make(map[string]string)
- metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
- args["metadata"] = fmt.Sprint(metadata)
- _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
- if err == nil {
- logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests various bucket supported formats.
-func testMakeBucketRegions() {
- region := "eu-central-1"
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "MakeBucket(bucketName, region)"
- // initialize logging params
- args := map[string]interface{}{
- "bucketName": "",
- "region": region,
- }
-
- // skipping region functional tests for non s3 runs
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
- return
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, region); err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- // Make a new bucket with '.' in its name, in 'us-west-2'. This
- // request is internally staged into a path style instead of
- // virtual host style.
- region = "us-west-2"
- args["region"] = region
- if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test PutObject using a large data to trigger multipart readat
-func testPutObjectReadAt() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "opts": "objectContentType",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "Make bucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Object content type
- objectContentType := "binary/octet-stream"
- args["objectContentType"] = objectContentType
-
- n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "Get Object failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat Object failed", err)
- return
- }
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
- return
- }
- if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "Content types don't match", err)
- return
- }
- if err := r.Close(); err != nil {
- logError(testName, function, args, startTime, "", "Object Close failed", err)
- return
- }
- if err := r.Close(); err == nil {
- logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test PutObject using a large data to trigger multipart readat
-func testPutObjectWithMetadata() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader,size, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
- }
-
- if !isFullMode() {
- ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
- return
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "Make bucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Object custom metadata
- customContentType := "custom/contenttype"
-
- args["metadata"] = map[string][]string{
- "Content-Type": {customContentType},
- }
-
- n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
- ContentType: customContentType})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
- return
- }
- if st.ContentType != customContentType && st.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
- return
- }
- if err := r.Close(); err != nil {
- logError(testName, function, args, startTime, "", "Object Close failed", err)
- return
- }
- if err := r.Close(); err == nil {
- logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testPutObjectWithContentLanguage() {
- // initialize logging params
- objectName := "test-object"
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader, size, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": objectName,
- "size": -1,
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- data := bytes.Repeat([]byte("a"), int(0))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{
- ContentLanguage: "en",
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != 0 {
- logError(testName, function, args, startTime, "", "Expected upload object '0' doesn't match with PutObject return value", err)
- return
- }
-
- objInfo, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
-
- if objInfo.Metadata.Get("Content-Language") != "en" {
- logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put object with streaming signature.
-func testPutObjectStreaming() {
- // initialize logging params
- objectName := "test-object"
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader,size,opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": objectName,
- "size": -1,
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Upload an object.
- sizes := []int64{0, 64*1024 - 1, 64 * 1024}
-
- for _, size := range sizes {
- data := bytes.Repeat([]byte("a"), int(size))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
- return
- }
-
- if n != size {
- logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err)
- return
- }
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test get object seeker from the end, using whence set to '2'.
-func testGetObjectSeekEnd() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
- return
- }
-
- pos, err := r.Seek(-100, 2)
- if err != nil {
- logError(testName, function, args, startTime, "", "Object Seek failed", err)
- return
- }
- if pos != st.Size-100 {
- logError(testName, function, args, startTime, "", "Incorrect position", err)
- return
- }
- buf2 := make([]byte, 100)
- m, err := io.ReadFull(r, buf2)
- if err != nil {
- logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err)
- return
- }
- if m != len(buf2) {
- logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
- return
- }
- hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
- hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
- if hexBuf1 != hexBuf2 {
- logError(testName, function, args, startTime, "", "Values at same index dont match", err)
- return
- }
- pos, err = r.Seek(-100, 2)
- if err != nil {
- logError(testName, function, args, startTime, "", "Object Seek failed", err)
- return
- }
- if pos != st.Size-100 {
- logError(testName, function, args, startTime, "", "Incorrect position", err)
- return
- }
- if err = r.Close(); err != nil {
- logError(testName, function, args, startTime, "", "ObjectClose failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test get object reader to not throw error on being closed twice.
-func testGetObjectClosedTwice() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
- return
- }
- if err := r.Close(); err != nil {
- logError(testName, function, args, startTime, "", "Object Close failed", err)
- return
- }
- if err := r.Close(); err == nil {
- logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test RemoveObjectsWithContext request context cancels after timeout
-func testRemoveObjectsWithContext() {
- // Initialize logging params.
- startTime := time.Now()
- testName := getFuncName()
- function := "RemoveObjectsWithContext(ctx, bucketName, objectsCh)"
- args := map[string]interface{}{
- "bucketName": "",
- }
-
- // Seed random based on current tie.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
-
- // Generate put data.
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
-
- // Multi remove of 20 objects.
- nrObjects := 20
- objectsCh := make(chan string)
- go func() {
- defer close(objectsCh)
- for i := 0; i < nrObjects; i++ {
- objectName := "sample" + strconv.Itoa(i) + ".txt"
- _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- continue
- }
- objectsCh <- objectName
- }
- }()
- // Set context to cancel in 1 nanosecond.
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- // Call RemoveObjectsWithContext API with short timeout.
- errorCh := c.RemoveObjectsWithContext(ctx, bucketName, objectsCh)
- // Check for error.
- select {
- case r := <-errorCh:
- if r.Err == nil {
- logError(testName, function, args, startTime, "", "RemoveObjectsWithContext should fail on short timeout", err)
- return
- }
- }
- // Set context with longer timeout.
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- args["ctx"] = ctx
- defer cancel()
- // Perform RemoveObjectsWithContext with the longer timeout. Expect the removals to succeed.
- errorCh = c.RemoveObjectsWithContext(ctx, bucketName, objectsCh)
- select {
- case r, more := <-errorCh:
- if more || r.Err != nil {
- logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
- return
- }
- }
-
- // Delete all objects and buckets.
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test removing multiple objects with Remove API
-func testRemoveMultipleObjects() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "RemoveObjects(bucketName, objectsCh)"
- args := map[string]interface{}{
- "bucketName": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
-
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
-
- // Multi remove of 1100 objects
- nrObjects := 200
-
- objectsCh := make(chan string)
-
- go func() {
- defer close(objectsCh)
- // Upload objects and send them to objectsCh
- for i := 0; i < nrObjects; i++ {
- objectName := "sample" + strconv.Itoa(i) + ".txt"
- _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- continue
- }
- objectsCh <- objectName
- }
- }()
-
- // Call RemoveObjects API
- errorCh := c.RemoveObjects(bucketName, objectsCh)
-
- // Check if errorCh doesn't receive any error
- select {
- case r, more := <-errorCh:
- if more {
- logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
- return
- }
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObject of a big file to trigger multipart
-func testFPutObjectMultipart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutObject(bucketName, objectName, fileName, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- var fileName = getMintDataDirFilePath("datafile-65-MB")
- if fileName == "" {
- // Make a temp file with minPartSize bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- logError(testName, function, args, startTime, "", "TempFile creation failed", err)
- return
- }
- // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- if err = file.Close(); err != nil {
- logError(testName, function, args, startTime, "", "File Close failed", err)
- return
- }
- fileName = file.Name()
- args["fileName"] = fileName
- }
- totalSize := dataFileMap["datafile-65-MB"]
- // Set base object name
- objectName := bucketName + "FPutObject" + "-standard"
- args["objectName"] = objectName
-
- objectContentType := "testapplication/octet-stream"
- args["objectContentType"] = objectContentType
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
- if n != int64(totalSize) {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
-
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- objInfo, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Unexpected error", err)
- return
- }
- if objInfo.Size != int64(totalSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
- return
- }
- if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObject with null contentType (default = application/octet-stream)
-func testFPutObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutObject(bucketName, objectName, fileName, opts)"
-
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- location := "us-east-1"
-
- // Make a new bucket.
- args["bucketName"] = bucketName
- args["location"] = location
- function = "MakeBucket()bucketName, location"
- err = c.MakeBucket(bucketName, location)
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
- // Use different data in part for multipart tests to check parts are uploaded in correct order.
- var fName = getMintDataDirFilePath("datafile-65-MB")
- if fName == "" {
- // Make a temp file with minPartSize bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- logError(testName, function, args, startTime, "", "TempFile creation failed", err)
- return
- }
-
- // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
- logError(testName, function, args, startTime, "", "File copy failed", err)
- return
- }
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- logError(testName, function, args, startTime, "", "File close failed", err)
- return
- }
- defer os.Remove(file.Name())
- fName = file.Name()
- }
- totalSize := dataFileMap["datafile-65-MB"]
-
- // Set base object name
- function = "FPutObject(bucketName, objectName, fileName, opts)"
- objectName := bucketName + "FPutObject"
- args["objectName"] = objectName + "-standard"
- args["fileName"] = fName
- args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
- if n != int64(totalSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
- return
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- args["objectName"] = objectName + "-Octet"
- n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "File close failed", err)
- return
- }
- if n != int64(totalSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
- return
- }
- srcFile, err := os.Open(fName)
- if err != nil {
- logError(testName, function, args, startTime, "", "File open failed", err)
- return
- }
- defer srcFile.Close()
- // Add extension to temp file name
- tmpFile, err := os.Create(fName + ".gtar")
- if err != nil {
- logError(testName, function, args, startTime, "", "File create failed", err)
- return
- }
- defer tmpFile.Close()
- _, err = io.Copy(tmpFile, srcFile)
- if err != nil {
- logError(testName, function, args, startTime, "", "File copy failed", err)
- return
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- args["objectName"] = objectName + "-GTar"
- args["opts"] = minio.PutObjectOptions{}
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
- if n != int64(totalSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
- return
- }
-
- // Check headers
- function = "StatObject(bucketName, objectName, opts)"
- args["objectName"] = objectName + "-standard"
- rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
- if rStandard.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
- return
- }
-
- function = "StatObject(bucketName, objectName, opts)"
- args["objectName"] = objectName + "-Octet"
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
- if rOctet.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err)
- return
- }
-
- function = "StatObject(bucketName, objectName, opts)"
- args["objectName"] = objectName + "-GTar"
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
- if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- if err = os.Remove(fName + ".gtar"); err != nil {
- logError(testName, function, args, startTime, "", "File remove failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObjectWithContext request context cancels after timeout
-func testFPutObjectWithContext() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutObject(bucketName, objectName, fileName, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- "opts": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Upload 1 parts worth of data to use multipart upload.
- // Use different data in part for multipart tests to check parts are uploaded in correct order.
- var fName = getMintDataDirFilePath("datafile-1-MB")
- if fName == "" {
- // Make a temp file with 1 MiB bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
- if err != nil {
- logError(testName, function, args, startTime, "", "TempFile creation failed", err)
- return
- }
-
- // Upload 1 parts to trigger multipart upload
- if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
- logError(testName, function, args, startTime, "", "File copy failed", err)
- return
- }
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- logError(testName, function, args, startTime, "", "File close failed", err)
- return
- }
- defer os.Remove(file.Name())
- fName = file.Name()
- }
- totalSize := dataFileMap["datafile-1-MB"]
-
- // Set base object name
- objectName := bucketName + "FPutObjectWithContext"
- args["objectName"] = objectName
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
- _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err == nil {
- logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
- return
- }
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- defer cancel()
- // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
- n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err)
- return
- }
- if n != int64(totalSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
- return
- }
-
- _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Tests FPutObjectWithContext request context cancels after timeout
-func testFPutObjectWithContextV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "opts": "minio.PutObjectOptions{ContentType:objectContentType}",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Upload 1 parts worth of data to use multipart upload.
- // Use different data in part for multipart tests to check parts are uploaded in correct order.
- var fName = getMintDataDirFilePath("datafile-1-MB")
- if fName == "" {
- // Make a temp file with 1 MiB bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
- if err != nil {
- logError(testName, function, args, startTime, "", "Temp file creation failed", err)
- return
- }
-
- // Upload 1 parts to trigger multipart upload
- if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
- logError(testName, function, args, startTime, "", "File copy failed", err)
- return
- }
-
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- logError(testName, function, args, startTime, "", "File close failed", err)
- return
- }
- defer os.Remove(file.Name())
- fName = file.Name()
- }
- totalSize := dataFileMap["datafile-1-MB"]
-
- // Set base object name
- objectName := bucketName + "FPutObjectWithContext"
- args["objectName"] = objectName
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
- _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err == nil {
- logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
- return
- }
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- defer cancel()
- // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
- n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err)
- return
- }
- if n != int64(totalSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err)
- return
- }
-
- _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test validates putObject with context to see if request cancellation is honored.
-func testPutObjectWithContext() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
- args := map[string]interface{}{
- "ctx": "",
- "bucketName": "",
- "objectName": "",
- "opts": "",
- }
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
- return
- }
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
- objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
- args["objectName"] = objectName
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
- defer cancel()
-
- _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err == nil {
- logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err)
- return
- }
-
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- args["ctx"] = ctx
-
- defer cancel()
- reader = getDataReader("datafile-33-kB")
- defer reader.Close()
- _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Tests get object ReaderSeeker interface methods.
-func testGetObjectReadSeekFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- defer func() {
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- }()
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat object failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
- return
- }
-
- // This following function helps us to compare data from the reader after seek
- // with the data from the original buffer
- cmpData := func(r io.Reader, start, end int) {
- if end-start == 0 {
- return
- }
- buffer := bytes.NewBuffer([]byte{})
- if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "CopyN failed", err)
- return
- }
- }
- if !bytes.Equal(buf[start:end], buffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
- return
- }
- }
-
- // Generic seek error for errors other than io.EOF
- seekErr := errors.New("seek error")
-
- testCases := []struct {
- offset int64
- whence int
- pos int64
- err error
- shouldCmp bool
- start int
- end int
- }{
- // Start from offset 0, fetch data and compare
- {0, 0, 0, nil, true, 0, 0},
- // Start from offset 2048, fetch data and compare
- {2048, 0, 2048, nil, true, 2048, bufSize},
- // Start from offset larger than possible
- {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
- // Move to offset 0 without comparing
- {0, 0, 0, nil, false, 0, 0},
- // Move one step forward and compare
- {1, 1, 1, nil, true, 1, bufSize},
- // Move larger than possible
- {int64(bufSize), 1, 0, seekErr, false, 0, 0},
- // Provide negative offset with CUR_SEEK
- {int64(-1), 1, 0, seekErr, false, 0, 0},
- // Test with whence SEEK_END and with positive offset
- {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
- // Test with whence SEEK_END and with negative offset
- {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
- // Test with whence SEEK_END and with large negative offset
- {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
- }
-
- for i, testCase := range testCases {
- // Perform seek operation
- n, err := r.Seek(testCase.offset, testCase.whence)
- // We expect an error
- if testCase.err == seekErr && err == nil {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
- return
- }
- // We expect a specific error
- if testCase.err != seekErr && testCase.err != err {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
- return
- }
- // If we expect an error go to the next loop
- if testCase.err != nil {
- continue
- }
- // Check the returned seek pos
- if n != testCase.pos {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
- return
- }
- // Compare only if shouldCmp is activated
- if testCase.shouldCmp {
- cmpData(r, testCase.start, testCase.end)
- }
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests get object ReaderAt interface methods.
-func testGetObjectReadAtFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- // read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- offset := int64(2048)
-
- // read directly
- buf1 := make([]byte, 512)
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- // Test readAt before stat is called such that objectInfo doesn't change.
- m, err := r.ReadAt(buf1, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf1) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf1, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
- return
- }
-
- m, err = r.ReadAt(buf2, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf2) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
-
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf3) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf4) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- if m != len(buf5) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf, buf5) {
- logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
- return
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test Presigned Post Policy
-func testPresignedPostPolicy() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PresignedPostPolicy(policy)"
- args := map[string]interface{}{
- "policy": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- // Azure requires the key to not start with a number
- metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
- metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
- return
- }
-
- policy := minio.NewPostPolicy()
-
- if err := policy.SetBucket(""); err == nil {
- logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
- return
- }
- if err := policy.SetKey(""); err == nil {
- logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
- return
- }
- if err := policy.SetKeyStartsWith(""); err == nil {
- logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err)
- return
- }
- if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
- logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
- return
- }
- if err := policy.SetContentType(""); err == nil {
- logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
- return
- }
- if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
- logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
- return
- }
- if err := policy.SetUserMetadata("", ""); err == nil {
- logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
- return
- }
-
- policy.SetBucket(bucketName)
- policy.SetKey(objectName)
- policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
- policy.SetContentType("binary/octet-stream")
- policy.SetContentLengthRange(10, 1024*1024)
- policy.SetUserMetadata(metadataKey, metadataValue)
- args["policy"] = policy.String()
-
- presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
- return
- }
-
- var formBuf bytes.Buffer
- writer := multipart.NewWriter(&formBuf)
- for k, v := range formData {
- writer.WriteField(k, v)
- }
-
- // Get a 33KB file to upload and test if set post policy works
- var filePath = getMintDataDirFilePath("datafile-33-kB")
- if filePath == "" {
- // Make a temp file with 33 KB data.
- file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
- if err != nil {
- logError(testName, function, args, startTime, "", "TempFile creation failed", err)
- return
- }
- if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- if err = file.Close(); err != nil {
- logError(testName, function, args, startTime, "", "File Close failed", err)
- return
- }
- filePath = file.Name()
- }
-
- // add file to post request
- f, err := os.Open(filePath)
- defer f.Close()
- if err != nil {
- logError(testName, function, args, startTime, "", "File open failed", err)
- return
- }
- w, err := writer.CreateFormFile("file", filePath)
- if err != nil {
- logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
- return
- }
-
- _, err = io.Copy(w, f)
- if err != nil {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- writer.Close()
-
- // make post request with correct form data
- res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes()))
- if err != nil {
- logError(testName, function, args, startTime, "", "Http request failed", err)
- return
- }
- defer res.Body.Close()
- if res.StatusCode != http.StatusNoContent {
- logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
- return
- }
-
- // expected path should be absolute path of the object
- var scheme string
- if mustParseBool(os.Getenv(enableHTTPS)) {
- scheme = "https://"
- } else {
- scheme = "http://"
- }
-
- expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
- expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
-
- if val, ok := res.Header["Location"]; ok {
- if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
- logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
- return
- }
- } else {
- logError(testName, function, args, startTime, "", "Location not found in header response", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests copy object
-func testCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(dst, src)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Make a new bucket in 'us-east-1' (destination bucket).
- err = c.MakeBucket(bucketName+"-copy", "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- // Check the various fields of source object against destination object.
- objInfo, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- // Copy Source
- src := minio.NewSourceInfo(bucketName, objectName, nil)
- args["src"] = src
-
- // Set copy conditions.
-
- // All invalid conditions first.
- err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
- if err == nil {
- logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
- return
- }
- err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
- if err == nil {
- logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
- return
- }
- err = src.SetMatchETagCond("")
- if err == nil {
- logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
- return
- }
- err = src.SetMatchETagExceptCond("")
- if err == nil {
- logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
- return
- }
-
- err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
- return
- }
- err = src.SetMatchETagCond(objInfo.ETag)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
- return
- }
-
- dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
- args["dst"] = dst
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- // Perform the Copy
- err = c.CopyObject(dst, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
-
- // Source object
- r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- // Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- // Check the various fields of source object against destination object.
- objInfo, err = r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- objInfoCopy, err := readerCopy.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- if objInfo.Size != objInfoCopy.Size {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
- return
- }
-
- // Close all the get readers before proceeding with CopyObject operations.
- r.Close()
- readerCopy.Close()
-
- // CopyObject again but with wrong conditions
- src = minio.NewSourceInfo(bucketName, objectName, nil)
- err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
- return
- }
- err = src.SetMatchETagExceptCond(objInfo.ETag)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
- return
- }
-
- // Perform the Copy which should fail
- err = c.CopyObject(dst, src)
- if err == nil {
- logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
- return
- }
-
- // Perform the Copy which should update only metadata.
- src = minio.NewSourceInfo(bucketName, objectName, nil)
- dst, err = minio.NewDestinationInfo(bucketName, objectName, nil, map[string]string{
- "Copy": "should be same",
- })
- args["dst"] = dst
- args["src"] = src
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- err = c.CopyObject(dst, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err)
- return
- }
-
- oi, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
-
- stOpts := minio.StatObjectOptions{}
- stOpts.SetMatchETag(oi.ETag)
- objInfo, err = c.StatObject(bucketName, objectName, stOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err)
- return
- }
-
- if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" {
- logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- if err = cleanupBucket(bucketName+"-copy", c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests SSE-C get object ReaderSeeker interface methods.
-func testSSECEncryptedGetObjectReadSeekFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- defer func() {
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- }()
-
- // Generate 65MiB of data.
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
- ContentType: "binary/octet-stream",
- ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{
- ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer r.Close()
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat object failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
- return
- }
-
- // This following function helps us to compare data from the reader after seek
- // with the data from the original buffer
- cmpData := func(r io.Reader, start, end int) {
- if end-start == 0 {
- return
- }
- buffer := bytes.NewBuffer([]byte{})
- if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "CopyN failed", err)
- return
- }
- }
- if !bytes.Equal(buf[start:end], buffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
- return
- }
- }
-
- testCases := []struct {
- offset int64
- whence int
- pos int64
- err error
- shouldCmp bool
- start int
- end int
- }{
- // Start from offset 0, fetch data and compare
- {0, 0, 0, nil, true, 0, 0},
- // Start from offset 2048, fetch data and compare
- {2048, 0, 2048, nil, true, 2048, bufSize},
- // Start from offset larger than possible
- {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
- // Move to offset 0 without comparing
- {0, 0, 0, nil, false, 0, 0},
- // Move one step forward and compare
- {1, 1, 1, nil, true, 1, bufSize},
- // Move larger than possible
- {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
- // Provide negative offset with CUR_SEEK
- {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
- // Test with whence SEEK_END and with positive offset
- {1024, 2, 0, io.EOF, false, 0, 0},
- // Test with whence SEEK_END and with negative offset
- {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
- // Test with whence SEEK_END and with large negative offset
- {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
- // Test with invalid whence
- {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
- }
-
- for i, testCase := range testCases {
- // Perform seek operation
- n, err := r.Seek(testCase.offset, testCase.whence)
- if err != nil && testCase.err == nil {
- // We expected success.
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
- return
- }
- if err == nil && testCase.err != nil {
- // We expected failure, but got success.
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
- return
- }
- if err != nil && testCase.err != nil {
- if err.Error() != testCase.err.Error() {
- // We expect a specific error
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
- return
- }
- }
- // Check the returned seek pos
- if n != testCase.pos {
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
- return
- }
- // Compare only if shouldCmp is activated
- if testCase.shouldCmp {
- cmpData(r, testCase.start, testCase.end)
- }
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests SSE-S3 get object ReaderSeeker interface methods.
-func testSSES3EncryptedGetObjectReadSeekFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- defer func() {
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- }()
-
- // Generate 65MiB of data.
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
- ContentType: "binary/octet-stream",
- ServerSideEncryption: encrypt.NewSSE(),
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer r.Close()
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat object failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
- return
- }
-
- // This following function helps us to compare data from the reader after seek
- // with the data from the original buffer
- cmpData := func(r io.Reader, start, end int) {
- if end-start == 0 {
- return
- }
- buffer := bytes.NewBuffer([]byte{})
- if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "CopyN failed", err)
- return
- }
- }
- if !bytes.Equal(buf[start:end], buffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
- return
- }
- }
-
- testCases := []struct {
- offset int64
- whence int
- pos int64
- err error
- shouldCmp bool
- start int
- end int
- }{
- // Start from offset 0, fetch data and compare
- {0, 0, 0, nil, true, 0, 0},
- // Start from offset 2048, fetch data and compare
- {2048, 0, 2048, nil, true, 2048, bufSize},
- // Start from offset larger than possible
- {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},
- // Move to offset 0 without comparing
- {0, 0, 0, nil, false, 0, 0},
- // Move one step forward and compare
- {1, 1, 1, nil, true, 1, bufSize},
- // Move larger than possible
- {int64(bufSize), 1, 0, io.EOF, false, 0, 0},
- // Provide negative offset with CUR_SEEK
- {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0},
- // Test with whence SEEK_END and with positive offset
- {1024, 2, 0, io.EOF, false, 0, 0},
- // Test with whence SEEK_END and with negative offset
- {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
- // Test with whence SEEK_END and with large negative offset
- {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0},
- // Test with invalid whence
- {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0},
- }
-
- for i, testCase := range testCases {
- // Perform seek operation
- n, err := r.Seek(testCase.offset, testCase.whence)
- if err != nil && testCase.err == nil {
- // We expected success.
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
- return
- }
- if err == nil && testCase.err != nil {
- // We expected failure, but got success.
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
- return
- }
- if err != nil && testCase.err != nil {
- if err.Error() != testCase.err.Error() {
- // We expect a specific error
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err)
- return
- }
- }
- // Check the returned seek pos
- if n != testCase.pos {
- logError(testName, function, args, startTime, "",
- fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err)
- return
- }
- // Compare only if shouldCmp is activated
- if testCase.shouldCmp {
- cmpData(r, testCase.start, testCase.end)
- }
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests SSE-C get object ReaderAt interface methods.
-func testSSECEncryptedGetObjectReadAtFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 65MiB of data.
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
- ContentType: "binary/octet-stream",
- ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- // read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{
- ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)),
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- defer r.Close()
-
- offset := int64(2048)
-
- // read directly
- buf1 := make([]byte, 512)
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- // Test readAt before stat is called such that objectInfo doesn't change.
- m, err := r.ReadAt(buf1, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf1) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf1, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
- return
- }
-
- m, err = r.ReadAt(buf2, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf2) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf3) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf4) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- if m != len(buf5) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf, buf5) {
- logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
- return
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests SSE-S3 get object ReaderAt interface methods.
-func testSSES3EncryptedGetObjectReadAtFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 65MiB of data.
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
- ContentType: "binary/octet-stream",
- ServerSideEncryption: encrypt.NewSSE(),
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
- return
- }
-
- // read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- defer r.Close()
-
- offset := int64(2048)
-
- // read directly
- buf1 := make([]byte, 512)
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- // Test readAt before stat is called such that objectInfo doesn't change.
- m, err := r.ReadAt(buf1, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf1) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf1, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
- return
- }
-
- m, err = r.ReadAt(buf2, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf2) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf3) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf4) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- if m != len(buf5) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
- return
- }
- if !bytes.Equal(buf, buf5) {
- logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
- return
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// testSSECEncryptionPutGet tests encryption with customer provided encryption keys
-func testSSECEncryptionPutGet() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "sse": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- testCases := []struct {
- buf []byte
- }{
- {buf: bytes.Repeat([]byte("F"), 1)},
- {buf: bytes.Repeat([]byte("F"), 15)},
- {buf: bytes.Repeat([]byte("F"), 16)},
- {buf: bytes.Repeat([]byte("F"), 17)},
- {buf: bytes.Repeat([]byte("F"), 31)},
- {buf: bytes.Repeat([]byte("F"), 32)},
- {buf: bytes.Repeat([]byte("F"), 33)},
- {buf: bytes.Repeat([]byte("F"), 1024)},
- {buf: bytes.Repeat([]byte("F"), 1024*2)},
- {buf: bytes.Repeat([]byte("F"), 1024*1024)},
- }
-
- const password = "correct horse battery staple" // https://xkcd.com/936/
-
- for i, testCase := range testCases {
- // Generate a random object name
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Secured object
- sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
- args["sse"] = sse
-
- // Put encrypted data
- _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
- return
- }
- defer r.Close()
-
- // Compare the sent object with the received one
- recvBuffer := bytes.NewBuffer([]byte{})
- if _, err = io.Copy(recvBuffer, r); err != nil {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
- return
- }
- if recvBuffer.Len() != len(testCase.buf) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
- return
- }
- if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// TestEncryptionFPut tests encryption with customer specified encryption keys
-func testSSECEncryptionFPut() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "filePath": "",
- "contentType": "",
- "sse": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Object custom metadata
- customContentType := "custom/contenttype"
- args["metadata"] = customContentType
-
- testCases := []struct {
- buf []byte
- }{
- {buf: bytes.Repeat([]byte("F"), 0)},
- {buf: bytes.Repeat([]byte("F"), 1)},
- {buf: bytes.Repeat([]byte("F"), 15)},
- {buf: bytes.Repeat([]byte("F"), 16)},
- {buf: bytes.Repeat([]byte("F"), 17)},
- {buf: bytes.Repeat([]byte("F"), 31)},
- {buf: bytes.Repeat([]byte("F"), 32)},
- {buf: bytes.Repeat([]byte("F"), 33)},
- {buf: bytes.Repeat([]byte("F"), 1024)},
- {buf: bytes.Repeat([]byte("F"), 1024*2)},
- {buf: bytes.Repeat([]byte("F"), 1024*1024)},
- }
-
- const password = "correct horse battery staple" // https://xkcd.com/936/
- for i, testCase := range testCases {
- // Generate a random object name
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Secured object
- sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
- args["sse"] = sse
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- file, err := os.Create(fileName)
- if err != nil {
- logError(testName, function, args, startTime, "", "file create failed", err)
- return
- }
- _, err = file.Write(testCase.buf)
- if err != nil {
- logError(testName, function, args, startTime, "", "file write failed", err)
- return
- }
- file.Close()
- // Put encrypted data
- if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
- logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
- return
- }
- defer r.Close()
-
- // Compare the sent object with the received one
- recvBuffer := bytes.NewBuffer([]byte{})
- if _, err = io.Copy(recvBuffer, r); err != nil {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
- return
- }
- if recvBuffer.Len() != len(testCase.buf) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
- return
- }
- if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
- return
- }
-
- if err = os.Remove(fileName); err != nil {
- logError(testName, function, args, startTime, "", "File remove failed", err)
- return
- }
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// testSSES3EncryptionPutGet tests SSE-S3 encryption
-func testSSES3EncryptionPutGet() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutEncryptedObject(bucketName, objectName, reader, sse)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "sse": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- testCases := []struct {
- buf []byte
- }{
- {buf: bytes.Repeat([]byte("F"), 1)},
- {buf: bytes.Repeat([]byte("F"), 15)},
- {buf: bytes.Repeat([]byte("F"), 16)},
- {buf: bytes.Repeat([]byte("F"), 17)},
- {buf: bytes.Repeat([]byte("F"), 31)},
- {buf: bytes.Repeat([]byte("F"), 32)},
- {buf: bytes.Repeat([]byte("F"), 33)},
- {buf: bytes.Repeat([]byte("F"), 1024)},
- {buf: bytes.Repeat([]byte("F"), 1024*2)},
- {buf: bytes.Repeat([]byte("F"), 1024*1024)},
- }
-
- for i, testCase := range testCases {
- // Generate a random object name
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Secured object
- sse := encrypt.NewSSE()
- args["sse"] = sse
-
- // Put encrypted data
- _, err = c.PutObject(bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
- return
- }
-
- // Read the data back without any encryption headers
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
- return
- }
- defer r.Close()
-
- // Compare the sent object with the received one
- recvBuffer := bytes.NewBuffer([]byte{})
- if _, err = io.Copy(recvBuffer, r); err != nil {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
- return
- }
- if recvBuffer.Len() != len(testCase.buf) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
- return
- }
- if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// TestSSES3EncryptionFPut tests server side encryption
-func testSSES3EncryptionFPut() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "filePath": "",
- "contentType": "",
- "sse": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Object custom metadata
- customContentType := "custom/contenttype"
- args["metadata"] = customContentType
-
- testCases := []struct {
- buf []byte
- }{
- {buf: bytes.Repeat([]byte("F"), 0)},
- {buf: bytes.Repeat([]byte("F"), 1)},
- {buf: bytes.Repeat([]byte("F"), 15)},
- {buf: bytes.Repeat([]byte("F"), 16)},
- {buf: bytes.Repeat([]byte("F"), 17)},
- {buf: bytes.Repeat([]byte("F"), 31)},
- {buf: bytes.Repeat([]byte("F"), 32)},
- {buf: bytes.Repeat([]byte("F"), 33)},
- {buf: bytes.Repeat([]byte("F"), 1024)},
- {buf: bytes.Repeat([]byte("F"), 1024*2)},
- {buf: bytes.Repeat([]byte("F"), 1024*1024)},
- }
-
- for i, testCase := range testCases {
- // Generate a random object name
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Secured object
- sse := encrypt.NewSSE()
- args["sse"] = sse
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- file, err := os.Create(fileName)
- if err != nil {
- logError(testName, function, args, startTime, "", "file create failed", err)
- return
- }
- _, err = file.Write(testCase.buf)
- if err != nil {
- logError(testName, function, args, startTime, "", "file write failed", err)
- return
- }
- file.Close()
- // Put encrypted data
- if _, err = c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil {
- logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
- return
- }
- defer r.Close()
-
- // Compare the sent object with the received one
- recvBuffer := bytes.NewBuffer([]byte{})
- if _, err = io.Copy(recvBuffer, r); err != nil {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
- return
- }
- if recvBuffer.Len() != len(testCase.buf) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
- return
- }
- if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
- return
- }
-
- if err = os.Remove(fileName); err != nil {
- logError(testName, function, args, startTime, "", "File remove failed", err)
- return
- }
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testBucketNotification() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "SetBucketNotification(bucketName)"
- args := map[string]interface{}{
- "bucketName": "",
- }
-
- if os.Getenv("NOTIFY_BUCKET") == "" ||
- os.Getenv("NOTIFY_SERVICE") == "" ||
- os.Getenv("NOTIFY_REGION") == "" ||
- os.Getenv("NOTIFY_ACCOUNTID") == "" ||
- os.Getenv("NOTIFY_RESOURCE") == "" {
- ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info()
- return
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- bucketName := os.Getenv("NOTIFY_BUCKET")
- args["bucketName"] = bucketName
-
- topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
- queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
-
- topicConfig := minio.NewNotificationConfig(topicArn)
-
- topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
- topicConfig.AddFilterSuffix("jpg")
-
- queueConfig := minio.NewNotificationConfig(queueArn)
- queueConfig.AddEvents(minio.ObjectCreatedAll)
- queueConfig.AddFilterPrefix("photos/")
-
- bNotification := minio.BucketNotification{}
- bNotification.AddTopic(topicConfig)
-
- // Add the same topicConfig again, should have no effect
- // because it is duplicated
- bNotification.AddTopic(topicConfig)
- if len(bNotification.TopicConfigs) != 1 {
- logError(testName, function, args, startTime, "", "Duplicate entry added", err)
- return
- }
-
- // Add and remove a queue config
- bNotification.AddQueue(queueConfig)
- bNotification.RemoveQueueByArn(queueArn)
-
- err = c.SetBucketNotification(bucketName, bNotification)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
- return
- }
-
- bNotification, err = c.GetBucketNotification(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
- return
- }
-
- if len(bNotification.TopicConfigs) != 1 {
- logError(testName, function, args, startTime, "", "Topic config is empty", err)
- return
- }
-
- if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
- logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
- return
- }
-
- err = c.RemoveAllBucketNotification(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests comprehensive list of all methods.
-func testFunctional() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "testFunctional()"
- functionAll := ""
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, nil, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- // Make a new bucket.
- function = "MakeBucket(bucketName, region)"
- functionAll = "MakeBucket(bucketName, region)"
- args["bucketName"] = bucketName
- err = c.MakeBucket(bucketName, "us-east-1")
-
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- file, err := os.Create(fileName)
- if err != nil {
- logError(testName, function, args, startTime, "", "File creation failed", err)
- return
- }
- for i := 0; i < 3; i++ {
- buf := make([]byte, rand.Intn(1<<19))
- _, err = file.Write(buf)
- if err != nil {
- logError(testName, function, args, startTime, "", "File write failed", err)
- return
- }
- }
- file.Close()
-
- // Verify if bucket exits and you have access.
- var exists bool
- function = "BucketExists(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
- exists, err = c.BucketExists(bucketName)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "BucketExists failed", err)
- return
- }
- if !exists {
- logError(testName, function, args, startTime, "", "Could not find the bucket", err)
- return
- }
-
- // Asserting the default bucket policy.
- function = "GetBucketPolicy(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
- nilPolicy, err := c.GetBucketPolicy(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
- return
- }
- if nilPolicy != "" {
- logError(testName, function, args, startTime, "", "policy should be set to nil", err)
- return
- }
-
- // Set the bucket policy to 'public readonly'.
- function = "SetBucketPolicy(bucketName, readOnlyPolicy)"
- functionAll += ", " + function
-
- readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
- args = map[string]interface{}{
- "bucketName": bucketName,
- "bucketPolicy": readOnlyPolicy,
- }
-
- err = c.SetBucketPolicy(bucketName, readOnlyPolicy)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
- return
- }
- // should return policy `readonly`.
- function = "GetBucketPolicy(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
- _, err = c.GetBucketPolicy(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
- return
- }
-
- // Make the bucket 'public writeonly'.
- function = "SetBucketPolicy(bucketName, writeOnlyPolicy)"
- functionAll += ", " + function
-
- writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
- args = map[string]interface{}{
- "bucketName": bucketName,
- "bucketPolicy": writeOnlyPolicy,
- }
- err = c.SetBucketPolicy(bucketName, writeOnlyPolicy)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
- return
- }
- // should return policy `writeonly`.
- function = "GetBucketPolicy(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
-
- _, err = c.GetBucketPolicy(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
- return
- }
-
- // Make the bucket 'public read/write'.
- function = "SetBucketPolicy(bucketName, readWritePolicy)"
- functionAll += ", " + function
-
- readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}`
-
- args = map[string]interface{}{
- "bucketName": bucketName,
- "bucketPolicy": readWritePolicy,
- }
- err = c.SetBucketPolicy(bucketName, readWritePolicy)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
- return
- }
- // should return policy `readwrite`.
- function = "GetBucketPolicy(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
- _, err = c.GetBucketPolicy(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
- return
- }
-
- // List all buckets.
- function = "ListBuckets()"
- functionAll += ", " + function
- args = nil
- buckets, err := c.ListBuckets()
-
- if len(buckets) == 0 {
- logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
- return
- }
- if err != nil {
- logError(testName, function, args, startTime, "", "ListBuckets failed", err)
- return
- }
-
- // Verify if previously created bucket is listed in list buckets.
- bucketFound := false
- for _, bucket := range buckets {
- if bucket.Name == bucketName {
- bucketFound = true
- }
- }
-
- // If bucket not found error out.
- if !bucketFound {
- logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
- return
- }
-
- objectName := bucketName + "unique"
-
- // Generate data
- buf := bytes.Repeat([]byte("f"), 1<<19)
-
- function = "PutObject(bucketName, objectName, reader, contentType)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "contentType": "",
- }
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(len(buf)) {
- logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
- return
- }
-
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName + "-nolength",
- "contentType": "binary/octet-stream",
- }
-
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(len(buf)) {
- logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
- return
- }
-
- // Instantiate a done channel to close all listing.
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- objFound := false
- isRecursive := true // Recursive is true.
-
- function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "isRecursive": isRecursive,
- }
-
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
- return
- }
-
- objFound = false
- isRecursive = true // Recursive is true.
- function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "isRecursive": isRecursive,
- }
-
- for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
- return
- }
-
- incompObjNotFound := true
-
- function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "isRecursive": isRecursive,
- }
-
- for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
- if objIncompl.Key != "" {
- incompObjNotFound = false
- break
- }
- }
- if !incompObjNotFound {
- logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
- return
- }
-
- function = "GetObject(bucketName, objectName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- }
- newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- newReadBytes, err := ioutil.ReadAll(newReader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
- return
- }
- newReader.Close()
-
- function = "FGetObject(bucketName, objectName, fileName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "fileName": fileName + "-f",
- }
- err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
-
- if err != nil {
- logError(testName, function, args, startTime, "", "FGetObject failed", err)
- return
- }
-
- function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": "",
- "expires": 3600 * time.Second,
- }
- if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil {
- logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
- return
- }
-
- // Generate presigned HEAD object url.
- function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "expires": 3600 * time.Second,
- }
- presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
- return
- }
- // Verify if presigned url works.
- resp, err := http.Head(presignedHeadURL.String())
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
- return
- }
- if resp.StatusCode != http.StatusOK {
- logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
- return
- }
- if resp.Header.Get("ETag") == "" {
- logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
- return
- }
- resp.Body.Close()
-
- function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": "",
- "expires": 3600 * time.Second,
- }
- _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil)
- if err == nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
- return
- }
-
- // Generate presigned GET object url.
- function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "expires": 3600 * time.Second,
- }
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
- return
- }
-
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
- return
- }
- if resp.StatusCode != http.StatusOK {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
- return
- }
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
- return
- }
- resp.Body.Close()
- if !bytes.Equal(newPresignedBytes, buf) {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
- return
- }
-
- // Set request parameters.
- reqParams := make(url.Values)
- reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "expires": 3600 * time.Second,
- "reqParams": reqParams,
- }
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
- return
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
- return
- }
- if resp.StatusCode != http.StatusOK {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
- return
- }
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
- return
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
- return
- }
- if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
- return
- }
-
- function = "PresignedPutObject(bucketName, objectName, expires)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": "",
- "expires": 3600 * time.Second,
- }
- _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second)
- if err == nil {
- logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
- return
- }
-
- function = "PresignedPutObject(bucketName, objectName, expires)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName + "-presigned",
- "expires": 3600 * time.Second,
- }
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
- return
- }
-
- buf = bytes.Repeat([]byte("g"), 1<<19)
-
- req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
- if err != nil {
- logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
- return
- }
- httpClient := &http.Client{
- // Setting a sensible time out of 30secs to wait for response
- // headers. Request is pro-actively cancelled after 30secs
- // with no response.
- Timeout: 30 * time.Second,
- Transport: http.DefaultTransport,
- }
- resp, err = httpClient.Do(req)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
- return
- }
-
- newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
- return
- }
-
- newReadBytes, err = ioutil.ReadAll(newReader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
- return
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- logError(testName, function, args, startTime, "", "Bytes mismatch", err)
- return
- }
-
- function = "RemoveObject(bucketName, objectName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- }
- err = c.RemoveObject(bucketName, objectName)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveObject failed", err)
- return
- }
- args["objectName"] = objectName + "-f"
- err = c.RemoveObject(bucketName, objectName+"-f")
-
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveObject failed", err)
- return
- }
-
- args["objectName"] = objectName + "-nolength"
- err = c.RemoveObject(bucketName, objectName+"-nolength")
-
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveObject failed", err)
- return
- }
-
- args["objectName"] = objectName + "-presigned"
- err = c.RemoveObject(bucketName, objectName+"-presigned")
-
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveObject failed", err)
- return
- }
-
- function = "RemoveBucket(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
- err = c.RemoveBucket(bucketName)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
- return
- }
- err = c.RemoveBucket(bucketName)
- if err == nil {
- logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
- return
- }
- if err.Error() != "The specified bucket does not exist" {
- logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
- return
- }
-
- if err = os.Remove(fileName); err != nil {
- logError(testName, function, args, startTime, "", "File Remove failed", err)
- return
- }
- if err = os.Remove(fileName + "-f"); err != nil {
- logError(testName, function, args, startTime, "", "File Remove failed", err)
- return
- }
- successLogger(testName, functionAll, args, startTime).Info()
-}
-
-// Test for validating GetObject Reader* methods functioning when the
-// object is modified in the object store.
-func testGetObjectModified() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
-
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
- defer c.RemoveBucket(bucketName)
-
- // Upload an object.
- objectName := "myobject"
- args["objectName"] = objectName
- content := "helloworld"
- _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
- if err != nil {
- logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
- return
- }
-
- defer c.RemoveObject(bucketName, objectName)
-
- reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
- return
- }
- defer reader.Close()
-
- // Read a few bytes of the object.
- b := make([]byte, 5)
- n, err := reader.ReadAt(b, 0)
- if err != nil {
- logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
- return
- }
-
- // Upload different contents to the same object while object is being read.
- newContent := "goodbyeworld"
- _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
- if err != nil {
- logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
- return
- }
-
- // Confirm that a Stat() call in between doesn't change the Object's cached etag.
- _, err = reader.Stat()
- expectedError := "At least one of the pre-conditions you specified did not hold"
- if err.Error() != expectedError {
- logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
- return
- }
-
- // Read again only to find object contents have been modified since last read.
- _, err = reader.ReadAt(b, int64(n))
- if err.Error() != expectedError {
- logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test validates putObject to upload a file seeked at a given offset.
-func testPutObjectUploadSeekedObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileToUpload": "",
- "contentType": "binary/octet-stream",
- }
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
- defer c.RemoveBucket(bucketName)
-
- var tempfile *os.File
-
- if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
- tempfile, err = os.Open(fileName)
- if err != nil {
- logError(testName, function, args, startTime, "", "File open failed", err)
- return
- }
- args["fileToUpload"] = fileName
- } else {
- tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
- if err != nil {
- logError(testName, function, args, startTime, "", "TempFile create failed", err)
- return
- }
- args["fileToUpload"] = tempfile.Name()
-
- // Generate 100kB data
- if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
- logError(testName, function, args, startTime, "", "File copy failed", err)
- return
- }
-
- defer os.Remove(tempfile.Name())
-
- // Seek back to the beginning of the file.
- tempfile.Seek(0, 0)
- }
- var length = 100 * humanize.KiByte
- objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
- args["objectName"] = objectName
-
- offset := length / 2
- if _, err = tempfile.Seek(int64(offset), 0); err != nil {
- logError(testName, function, args, startTime, "", "TempFile seek failed", err)
- return
- }
-
- n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- if n != int64(length-offset) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err)
- return
- }
- tempfile.Close()
-
- obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer obj.Close()
-
- n, err = obj.Seek(int64(offset), 0)
- if err != nil {
- logError(testName, function, args, startTime, "", "Seek failed", err)
- return
- }
- if n != int64(offset) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
- return
- }
-
- n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- if n != int64(length-offset) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests bucket re-create errors.
-func testMakeBucketErrorV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "MakeBucket(bucketName, region)"
- args := map[string]interface{}{
- "bucketName": "",
- "region": "eu-west-1",
- }
-
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
- return
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- region := "eu-west-1"
- args["bucketName"] = bucketName
- args["region"] = region
-
- // Make a new bucket in 'eu-west-1'.
- if err = c.MakeBucket(bucketName, region); err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
- if err = c.MakeBucket(bucketName, region); err == nil {
- logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
- return
- }
- // Verify valid error response from server.
- if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test get object reader to not throw error on being closed twice.
-func testGetObjectClosedTwiceV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "MakeBucket(bucketName, region)"
- args := map[string]interface{}{
- "bucketName": "",
- "region": "eu-west-1",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
- return
- }
- if err := r.Close(); err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- if err := r.Close(); err == nil {
- logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObject hidden contentType setting
-func testFPutObjectV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FPutObject(bucketName, objectName, fileName, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Make a temp file with 11*1024*1024 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- logError(testName, function, args, startTime, "", "TempFile creation failed", err)
- return
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
- n, err := io.CopyN(file, r, 11*1024*1024)
- if err != nil {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- if n != int64(11*1024*1024) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
- return
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- logError(testName, function, args, startTime, "", "File close failed", err)
- return
- }
-
- // Set base object name
- objectName := bucketName + "FPutObject"
- args["objectName"] = objectName
- args["fileName"] = file.Name()
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
- if n != int64(11*1024*1024) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
- return
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- args["objectName"] = objectName + "-Octet"
- args["contentType"] = ""
-
- n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
- if n != int64(11*1024*1024) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
- return
- }
-
- // Add extension to temp file name
- fileName := file.Name()
- err = os.Rename(file.Name(), fileName+".gtar")
- if err != nil {
- logError(testName, function, args, startTime, "", "Rename failed", err)
- return
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- args["objectName"] = objectName + "-Octet"
- args["contentType"] = ""
- args["fileName"] = fileName + ".gtar"
-
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FPutObject failed", err)
- return
- }
- if n != int64(11*1024*1024) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
- return
- }
-
- // Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
- if rStandard.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
- return
- }
-
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
- if rOctet.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
- return
- }
-
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
- if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" {
- logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- err = os.Remove(fileName + ".gtar")
- if err != nil {
- logError(testName, function, args, startTime, "", "File remove failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests various bucket supported formats.
-func testMakeBucketRegionsV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "MakeBucket(bucketName, region)"
- args := map[string]interface{}{
- "bucketName": "",
- "region": "eu-west-1",
- }
-
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
- return
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- // Make a new bucket with '.' in its name, in 'us-west-2'. This
- // request is internally staged into a path style instead of
- // virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- args["bucketName"] = bucketName + ".withperiod"
- args["region"] = "us-west-2"
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests get object ReaderSeeker interface methods.
-func testGetObjectReadSeekFunctionalV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data.
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer r.Close()
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
- return
- }
-
- offset := int64(2048)
- n, err = r.Seek(offset, 0)
- if err != nil {
- logError(testName, function, args, startTime, "", "Seek failed", err)
- return
- }
- if n != offset {
- logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
- return
- }
- n, err = r.Seek(0, 1)
- if err != nil {
- logError(testName, function, args, startTime, "", "Seek failed", err)
- return
- }
- if n != offset {
- logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
- return
- }
- _, err = r.Seek(offset, 2)
- if err == nil {
- logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
- return
- }
- n, err = r.Seek(-offset, 2)
- if err != nil {
- logError(testName, function, args, startTime, "", "Seek failed", err)
- return
- }
- if n != st.Size-offset {
- logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
- return
- }
-
- var buffer1 bytes.Buffer
- if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- }
- if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
- logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
- return
- }
-
- // Seek again and read again.
- n, err = r.Seek(offset-1, 0)
- if err != nil {
- logError(testName, function, args, startTime, "", "Seek failed", err)
- return
- }
- if n != (offset - 1) {
- logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
- return
- }
-
- var buffer2 bytes.Buffer
- if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "Copy failed", err)
- return
- }
- }
- // Verify now lesser bytes.
- if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests get object ReaderAt interface methods.
-func testGetObjectReadAtFunctionalV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- buf, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
-
- // Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
- return
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer r.Close()
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
-
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
- return
- }
-
- offset := int64(2048)
-
- // Read directly
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- m, err := r.ReadAt(buf2, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf2) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
- return
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf3) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
- return
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- if m != len(buf4) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
- return
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
- return
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- if m != len(buf5) {
- logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
- return
- }
- if !bytes.Equal(buf, buf5) {
- logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
- return
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- logError(testName, function, args, startTime, "", "ReadAt failed", err)
- return
- }
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests copy object
-func testCopyObjectV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Make a new bucket in 'us-east-1' (destination bucket).
- err = c.MakeBucket(bucketName+"-copy", "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate 33K of data.
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
- return
- }
-
- r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- // Check the various fields of source object against destination object.
- objInfo, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- r.Close()
-
- // Copy Source
- src := minio.NewSourceInfo(bucketName, objectName, nil)
- args["source"] = src
-
- // Set copy conditions.
-
- // All invalid conditions first.
- err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
- if err == nil {
- logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
- return
- }
- err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
- if err == nil {
- logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
- return
- }
- err = src.SetMatchETagCond("")
- if err == nil {
- logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
- return
- }
- err = src.SetMatchETagExceptCond("")
- if err == nil {
- logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
- return
- }
-
- err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
- return
- }
- err = src.SetMatchETagCond(objInfo.ETag)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
- return
- }
-
- dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
- args["destination"] = dst
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- // Perform the Copy
- err = c.CopyObject(dst, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
-
- // Source object
- r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- // Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- // Check the various fields of source object against destination object.
- objInfo, err = r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- objInfoCopy, err := readerCopy.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- if objInfo.Size != objInfoCopy.Size {
- logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
- return
- }
-
- // Close all the readers.
- r.Close()
- readerCopy.Close()
-
- // CopyObject again but with wrong conditions
- src = minio.NewSourceInfo(bucketName, objectName, nil)
- err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
- return
- }
- err = src.SetMatchETagExceptCond(objInfo.ETag)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
- return
- }
-
- // Perform the Copy which should fail
- err = c.CopyObject(dst, src)
- if err == nil {
- logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- if err = cleanupBucket(bucketName+"-copy", c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testComposeObjectErrorCasesWrapper(c *minio.Client) {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ComposeObject(destination, sourceList)"
- args := map[string]interface{}{}
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
-
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Test that more than 10K source objects cannot be
- // concatenated.
- srcArr := [10001]minio.SourceInfo{}
- srcSlice := srcArr[:]
- dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- args["destination"] = dst
- // Just explain about srcArr in args["sourceList"]
- // to stop having 10,001 null headers logged
- args["sourceList"] = "source array of 10,001 elements"
- if err := c.ComposeObject(dst, srcSlice); err == nil {
- logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
- return
- } else if err.Error() != "There must be as least one and up to 10000 source objects." {
- logError(testName, function, args, startTime, "", "Got unexpected error", err)
- return
- }
-
- // Create a source with invalid offset spec and check that
- // error is returned:
- // 1. Create the source object.
- const badSrcSize = 5 * 1024 * 1024
- buf := bytes.Repeat([]byte("1"), badSrcSize)
- _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- // 2. Set invalid range spec on the object (going beyond
- // object size)
- badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
- err = badSrc.SetRange(1, badSrcSize)
- if err != nil {
- logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err)
- return
- }
- // 3. ComposeObject call should fail.
- if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
- logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
- return
- } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
- logError(testName, function, args, startTime, "", "Got invalid error", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test expected error cases
-func testComposeObjectErrorCasesV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ComposeObject(destination, sourceList)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- testComposeObjectErrorCasesWrapper(c)
-}
-
-func testComposeMultipleSources(c *minio.Client) {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ComposeObject(destination, sourceList)"
- args := map[string]interface{}{
- "destination": "",
- "sourceList": "",
- }
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Upload a small source object
- const srcSize = 1024 * 1024 * 5
- buf := bytes.Repeat([]byte("1"), srcSize)
- _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- // We will append 10 copies of the object.
- srcs := []minio.SourceInfo{}
- for i := 0; i < 10; i++ {
- srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil))
- }
- // make the last part very small
- err = srcs[9].SetRange(0, 0)
- if err != nil {
- logError(testName, function, args, startTime, "", "SetRange failed", err)
- return
- }
- args["sourceList"] = srcs
-
- dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
- args["destination"] = dst
-
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- err = c.ComposeObject(dst, srcs)
- if err != nil {
- logError(testName, function, args, startTime, "", "ComposeObject failed", err)
- return
- }
-
- objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject failed", err)
- return
- }
-
- if objProps.Size != 9*srcSize+1 {
- logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test concatenating multiple objects objects
-func testCompose10KSourcesV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ComposeObject(destination, sourceList)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- testComposeMultipleSources(c)
-}
-
-func testEncryptedEmptyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object"))
-
- // 1. create an sse-c encrypted object to copy by uploading
- const srcSize = 0
- var buf []byte // Empty buffer
- args["objectName"] = "object"
- _, err = c.PutObject(bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- return
- }
-
- // 2. Test CopyObject for an empty object
- dstInfo, err := minio.NewDestinationInfo(bucketName, "new-object", sse, nil)
- if err != nil {
- args["objectName"] = "new-object"
- function = "NewDestinationInfo(bucketName, objectName, sse, userMetadata)"
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- srcInfo := minio.NewSourceInfo(bucketName, "object", sse)
- if err = c.CopyObject(dstInfo, srcInfo); err != nil {
- function = "CopyObject(dstInfo, srcInfo)"
- logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err)
- return
- }
-
- // 3. Test Key rotation
- newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object"))
- dstInfo, err = minio.NewDestinationInfo(bucketName, "new-object", newSSE, nil)
- if err != nil {
- args["objectName"] = "new-object"
- function = "NewDestinationInfo(bucketName, objectName, encryptSSEC, userMetadata)"
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- srcInfo = minio.NewSourceInfo(bucketName, "new-object", sse)
- if err = c.CopyObject(dstInfo, srcInfo); err != nil {
- function = "CopyObject(dstInfo, srcInfo)"
- logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err)
- return
- }
-
- // 4. Download the object.
- reader, err := c.GetObject(bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer reader.Close()
-
- decBytes, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
- return
- }
- if !bytes.Equal(decBytes, buf) {
- logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err)
- return
- }
- // Delete all objects and buckets
- delete(args, "objectName")
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
- var srcEncryption, dstEncryption encrypt.ServerSide
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // 1. create an sse-c encrypted object to copy by uploading
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
- _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{
- ServerSideEncryption: sseSrc,
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- return
- }
-
- if sseSrc != nil && sseSrc.Type() != encrypt.S3 {
- srcEncryption = sseSrc
- }
-
- // 2. copy object and change encryption key
- src := minio.NewSourceInfo(bucketName, "srcObject", srcEncryption)
- args["source"] = src
- dst, err := minio.NewDestinationInfo(bucketName, "dstObject", sseDst, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- args["destination"] = dst
-
- err = c.CopyObject(dst, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
-
- if sseDst != nil && sseDst.Type() != encrypt.S3 {
- dstEncryption = sseDst
- }
- // 3. get copied object and check if content is equal
- coreClient := minio.Core{c}
- reader, _, err := coreClient.GetObject(bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- decBytes, err := ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- if !bytes.Equal(decBytes, buf) {
- logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
- return
- }
- reader.Close()
-
- // Test key rotation for source object in-place.
- var newSSE encrypt.ServerSide
- if sseSrc != nil && sseSrc.Type() == encrypt.SSEC {
- newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key
- }
- if sseSrc != nil && sseSrc.Type() == encrypt.S3 {
- newSSE = encrypt.NewSSE()
- }
- if newSSE != nil {
- dst, err = minio.NewDestinationInfo(bucketName, "srcObject", newSSE, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- args["destination"] = dst
-
- err = c.CopyObject(dst, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
-
- // Get copied object and check if content is equal
- reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- decBytes, err = ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- if !bytes.Equal(decBytes, buf) {
- logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
- return
- }
- reader.Close()
- // Test in-place decryption.
- dst, err = minio.NewDestinationInfo(bucketName, "srcObject", nil, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- args["destination"] = dst
-
- src = minio.NewSourceInfo(bucketName, "srcObject", newSSE)
- args["source"] = src
- err = c.CopyObject(dst, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err)
- return
- }
- }
-
- // Get copied decrypted object and check if content is equal
- reader, _, err = coreClient.GetObject(bucketName, "srcObject", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- defer reader.Close()
-
- decBytes, err = ioutil.ReadAll(reader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- if !bytes.Equal(decBytes, buf) {
- logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test encrypted copy object
-func testUnencryptedToSSECCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- var sseSrc encrypt.ServerSide
- sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testUnencryptedToSSES3CopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- var sseSrc encrypt.ServerSide
- sseDst := encrypt.NewSSE()
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testUnencryptedToUnencryptedCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- var sseSrc, sseDst encrypt.ServerSide
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedSSECToSSECCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
- sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedSSECToSSES3CopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
- sseDst := encrypt.NewSSE()
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedSSECToUnencryptedCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
- var sseDst encrypt.ServerSide
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedSSES3ToSSECCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.NewSSE()
- sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedSSES3ToSSES3CopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.NewSSE()
- sseDst := encrypt.NewSSE()
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedSSES3ToUnencryptedCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.NewSSE()
- var sseDst encrypt.ServerSide
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-// Test encrypted copy object
-func testEncryptedCopyObjectV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
- sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject"))
- sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject"))
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst)
-}
-
-func testDecryptedCopyObject() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
- return
- }
-
- bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object"
- if err = c.MakeBucket(bucketName, "us-east-1"); err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName))
- _, err = c.PutObject(bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{
- ServerSideEncryption: encryption,
- })
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- return
- }
-
- src := minio.NewSourceInfo(bucketName, objectName, encrypt.SSECopy(encryption))
- args["source"] = src
- dst, err := minio.NewDestinationInfo(bucketName, "decrypted-"+objectName, nil, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- args["destination"] = dst
-
- if err = c.CopyObject(dst, src); err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
- if _, err = c.GetObject(bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test Core CopyObjectPart implementation
-func testSSECEncryptedToSSECCopyObjectPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- password := "correct horse battery staple"
- srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, srcencryption)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- encrypt.SSECopy(srcencryption).Marshal(header)
- dstencryption.Marshal(header)
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy
-func testSSECEncryptedToUnencryptedCopyPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- password := "correct horse battery staple"
- srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, srcencryption)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- var dstencryption encrypt.ServerSide
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- encrypt.SSECopy(srcencryption).Marshal(header)
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy
-func testSSECEncryptedToSSES3CopyObjectPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- password := "correct horse battery staple"
- srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, srcencryption)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- dstencryption := encrypt.NewSSE()
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- encrypt.SSECopy(srcencryption).Marshal(header)
- dstencryption.Marshal(header)
-
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part
-func testUnencryptedToSSECCopyObjectPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- password := "correct horse battery staple"
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- dstencryption.Marshal(header)
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
-func testUnencryptedToUnencryptedCopyPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
-func testUnencryptedToSSES3CopyObjectPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- dstencryption := encrypt.NewSSE()
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- dstencryption.Marshal(header)
-
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part
-func testSSES3EncryptedToSSECCopyObjectPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- password := "correct horse battery staple"
- srcEncryption := encrypt.NewSSE()
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, srcEncryption)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName))
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- dstencryption.Marshal(header)
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy
-func testSSES3EncryptedToUnencryptedCopyPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- srcEncryption := encrypt.NewSSE()
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, srcEncryption)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-
-// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy
-func testSSES3EncryptedToSSES3CopyObjectPart() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObjectPart(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- client, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Instantiate new core client object.
- c := minio.Core{client}
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- }
- defer cleanupBucket(bucketName, client)
- // Make a buffer with 5MB of data
- buf := bytes.Repeat([]byte("abcde"), 1024*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- srcEncryption := encrypt.NewSSE()
-
- objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
- "Content-Type": "binary/octet-stream",
- }, srcEncryption)
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- }
-
- if objInfo.Size != int64(len(buf)) {
- logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err)
- }
-
- destBucketName := bucketName
- destObjectName := objectName + "-dest"
- dstencryption := encrypt.NewSSE()
-
- uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err)
- }
-
- // Content of the destination object will be two copies of
- // `objectName` concatenated, followed by first byte of
- // `objectName`.
- metadata := make(map[string]string)
- header := make(http.Header)
- dstencryption.Marshal(header)
-
- for k, v := range header {
- metadata[k] = v[0]
- }
- // First of three parts
- fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Second of three parts
- sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Last of three parts
- lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err)
- }
-
- // Complete the multipart upload
- _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart})
- if err != nil {
- logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err)
- }
-
- // Stat the object and check its length matches
- objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{}})
- if err != nil {
- logError(testName, function, args, startTime, "", "StatObject call failed", err)
- }
-
- if objInfo.Size != (5*1024*1024)*2+1 {
- logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err)
- }
-
- // Now we read the data back
- getOpts := minio.GetObjectOptions{}
- getOpts.SetRange(0, 5*1024*1024-1)
- r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf := make([]byte, 5*1024*1024)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf, buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err)
- }
-
- getOpts.SetRange(5*1024*1024, 0)
- r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject call failed", err)
- }
- getBuf = make([]byte, 5*1024*1024+1)
- _, err = io.ReadFull(r, getBuf)
- if err != nil {
- logError(testName, function, args, startTime, "", "Read buffer failed", err)
- }
- if !bytes.Equal(getBuf[:5*1024*1024], buf) {
- logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err)
- }
- if getBuf[5*1024*1024] != buf[0] {
- logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err)
- }
-
- successLogger(testName, function, args, startTime).Info()
-
- // Do not need to remove destBucketName its same as bucketName.
-}
-func testUserMetadataCopying() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- // c.TraceOn(os.Stderr)
- testUserMetadataCopyingWrapper(c)
-}
-
-func testUserMetadataCopyingWrapper(c *minio.Client) {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- fetchMeta := func(object string) (h http.Header) {
- objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- h = make(http.Header)
- for k, vs := range objInfo.Metadata {
- if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
- for _, v := range vs {
- h.Add(k, v)
- }
- }
- }
- return h
- }
-
- // 1. create a client encrypted object to copy by uploading
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
- metadata := make(http.Header)
- metadata.Set("x-amz-meta-myheader", "myvalue")
- m := make(map[string]string)
- m["x-amz-meta-myheader"] = "myvalue"
- _, err = c.PutObject(bucketName, "srcObject",
- bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
- return
- }
- if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- // 2. create source
- src := minio.NewSourceInfo(bucketName, "srcObject", nil)
- // 2.1 create destination with metadata set
- dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- // 3. Check that copying to an object with metadata set resets
- // the headers on the copy.
- args["source"] = src
- args["destination"] = dst1
- err = c.CopyObject(dst1, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
-
- expectedHeaders := make(http.Header)
- expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
- if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- // 4. create destination with no metadata set and same source
- dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
- src = minio.NewSourceInfo(bucketName, "srcObject", nil)
-
- // 5. Check that copying to an object with no metadata set,
- // copies metadata.
- args["source"] = src
- args["destination"] = dst2
- err = c.CopyObject(dst2, src)
- if err != nil {
- logError(testName, function, args, startTime, "", "CopyObject failed", err)
- return
- }
-
- expectedHeaders = metadata
- if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- // 6. Compose a pair of sources.
- srcs := []minio.SourceInfo{
- minio.NewSourceInfo(bucketName, "srcObject", nil),
- minio.NewSourceInfo(bucketName, "srcObject", nil),
- }
- dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- function = "ComposeObject(destination, sources)"
- args["source"] = srcs
- args["destination"] = dst3
- err = c.ComposeObject(dst3, srcs)
- if err != nil {
- logError(testName, function, args, startTime, "", "ComposeObject failed", err)
- return
- }
-
- // Check that no headers are copied in this case
- if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- // 7. Compose a pair of sources with dest user metadata set.
- srcs = []minio.SourceInfo{
- minio.NewSourceInfo(bucketName, "srcObject", nil),
- minio.NewSourceInfo(bucketName, "srcObject", nil),
- }
- dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
- if err != nil {
- logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
- return
- }
-
- function = "ComposeObject(destination, sources)"
- args["source"] = srcs
- args["destination"] = dst4
- err = c.ComposeObject(dst4, srcs)
- if err != nil {
- logError(testName, function, args, startTime, "", "ComposeObject failed", err)
- return
- }
-
- // Check that no headers are copied in this case
- expectedHeaders = make(http.Header)
- expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
- if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testUserMetadataCopyingV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "CopyObject(destination, source)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // c.TraceOn(os.Stderr)
- testUserMetadataCopyingWrapper(c)
-}
-
-func testStorageClassMetadataPutObject() {
- // initialize logging params
- startTime := time.Now()
- function := "testStorageClassMetadataPutObject()"
- args := map[string]interface{}{}
- testName := getFuncName()
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- fetchMeta := func(object string) (h http.Header) {
- objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- h = make(http.Header)
- for k, vs := range objInfo.Metadata {
- if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
- for _, v := range vs {
- h.Add(k, v)
- }
- }
- }
- return h
- }
-
- metadata := make(http.Header)
- metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
-
- emptyMetadata := make(http.Header)
-
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
-
- _, err = c.PutObject(bucketName, "srcObjectRRSClass",
- bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- // Get the returned metadata
- returnedMeta := fetchMeta("srcObjectRRSClass")
-
- // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
- if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- metadata = make(http.Header)
- metadata.Set("x-amz-storage-class", "STANDARD")
-
- _, err = c.PutObject(bucketName, "srcObjectSSClass",
- bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) {
- logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testStorageClassInvalidMetadataPutObject() {
- // initialize logging params
- startTime := time.Now()
- function := "testStorageClassInvalidMetadataPutObject()"
- args := map[string]interface{}{}
- testName := getFuncName()
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
-
- _, err = c.PutObject(bucketName, "srcObjectRRSClass",
- bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"})
- if err == nil {
- logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-func testStorageClassMetadataCopyObject() {
- // initialize logging params
- startTime := time.Now()
- function := "testStorageClassMetadataCopyObject()"
- args := map[string]interface{}{}
- testName := getFuncName()
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
- return
- }
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- fetchMeta := func(object string) (h http.Header) {
- objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "Stat failed", err)
- return
- }
- h = make(http.Header)
- for k, vs := range objInfo.Metadata {
- if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
- for _, v := range vs {
- h.Add(k, v)
- }
- }
- }
- return h
- }
-
- metadata := make(http.Header)
- metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
-
- emptyMetadata := make(http.Header)
-
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize)
-
- // Put an object with RRS Storage class
- _, err = c.PutObject(bucketName, "srcObjectRRSClass",
- bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- // Make server side copy of object uploaded in previous step
- src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil)
- dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil)
- c.CopyObject(dst, src)
-
- // Get the returned metadata
- returnedMeta := fetchMeta("srcObjectRRSClassCopy")
-
- // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
- if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
- logError(testName, function, args, startTime, "", "Metadata match failed", err)
- return
- }
-
- metadata = make(http.Header)
- metadata.Set("x-amz-storage-class", "STANDARD")
-
- // Put an object with Standard Storage class
- _, err = c.PutObject(bucketName, "srcObjectSSClass",
- bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- // Make server side copy of object uploaded in previous step
- src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil)
- dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil)
- c.CopyObject(dst, src)
-
- // Fetch the meta data of copied object
- if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) {
- logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put object with size -1 byte object.
-func testPutObjectNoLengthV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader, size, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "size": -1,
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- objectName := bucketName + "unique"
- args["objectName"] = objectName
-
- bufSize := dataFileMap["datafile-65-MB"]
- var reader = getDataReader("datafile-65-MB")
- defer reader.Close()
- args["size"] = bufSize
-
- // Upload an object.
- n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{})
-
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
- return
- }
- if n != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put objects of unknown size.
-func testPutObjectsUnknownV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader,size,opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "size": "",
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Issues are revealed by trying to upload multiple files of unknown size
- // sequentially (on 4GB machines)
- for i := 1; i <= 4; i++ {
- // Simulate that we could be receiving byte slices of data that we want
- // to upload as a file
- rpipe, wpipe := io.Pipe()
- defer rpipe.Close()
- go func() {
- b := []byte("test")
- wpipe.Write(b)
- wpipe.Close()
- }()
-
- // Upload the object.
- objectName := fmt.Sprintf("%sunique%d", bucketName, i)
- args["objectName"] = objectName
-
- n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
- return
- }
- args["size"] = n
- if n != int64(4) {
- logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err)
- return
- }
-
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put object with 0 byte object.
-func testPutObject0ByteV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObject(bucketName, objectName, reader, size, opts)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "size": 0,
- "opts": "",
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- objectName := bucketName + "unique"
- args["objectName"] = objectName
- args["opts"] = minio.PutObjectOptions{}
-
- // Upload an object.
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
-
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
- return
- }
- if n != 0 {
- logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test expected error cases
-func testComposeObjectErrorCases() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ComposeObject(destination, sourceList)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- testComposeObjectErrorCasesWrapper(c)
-}
-
-// Test concatenating 10K objects
-func testCompose10KSources() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ComposeObject(destination, sourceList)"
- args := map[string]interface{}{}
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
- return
- }
-
- testComposeMultipleSources(c)
-}
-
-// Tests comprehensive list of all methods.
-func testFunctionalV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "testFunctionalV2()"
- functionAll := ""
- args := map[string]interface{}{}
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- location := "us-east-1"
- // Make a new bucket.
- function = "MakeBucket(bucketName, location)"
- functionAll = "MakeBucket(bucketName, location)"
- args = map[string]interface{}{
- "bucketName": bucketName,
- "location": location,
- }
- err = c.MakeBucket(bucketName, location)
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- file, err := os.Create(fileName)
- if err != nil {
- logError(testName, function, args, startTime, "", "file create failed", err)
- return
- }
- for i := 0; i < 3; i++ {
- buf := make([]byte, rand.Intn(1<<19))
- _, err = file.Write(buf)
- if err != nil {
- logError(testName, function, args, startTime, "", "file write failed", err)
- return
- }
- }
- file.Close()
-
- // Verify if bucket exits and you have access.
- var exists bool
- function = "BucketExists(bucketName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- }
- exists, err = c.BucketExists(bucketName)
- if err != nil {
- logError(testName, function, args, startTime, "", "BucketExists failed", err)
- return
- }
- if !exists {
- logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
- return
- }
-
- // Make the bucket 'public read/write'.
- function = "SetBucketPolicy(bucketName, bucketPolicy)"
- functionAll += ", " + function
-
- readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}`
-
- args = map[string]interface{}{
- "bucketName": bucketName,
- "bucketPolicy": readWritePolicy,
- }
- err = c.SetBucketPolicy(bucketName, readWritePolicy)
-
- if err != nil {
- logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
- return
- }
-
- // List all buckets.
- function = "ListBuckets()"
- functionAll += ", " + function
- args = nil
- buckets, err := c.ListBuckets()
- if len(buckets) == 0 {
- logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
- return
- }
- if err != nil {
- logError(testName, function, args, startTime, "", "ListBuckets failed", err)
- return
- }
-
- // Verify if previously created bucket is listed in list buckets.
- bucketFound := false
- for _, bucket := range buckets {
- if bucket.Name == bucketName {
- bucketFound = true
- }
- }
-
- // If bucket not found error out.
- if !bucketFound {
- logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
- return
- }
-
- objectName := bucketName + "unique"
-
- // Generate data
- buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
-
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "contentType": "",
- }
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
- if n != int64(len(buf)) {
- logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
- return
- }
-
- objectNameNoLength := objectName + "-nolength"
- args["objectName"] = objectNameNoLength
- n, err = c.PutObject(bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- if n != int64(len(buf)) {
- logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
- return
- }
-
- // Instantiate a done channel to close all listing.
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- objFound := false
- isRecursive := true // Recursive is true.
- function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "isRecursive": isRecursive,
- }
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
- return
- }
-
- incompObjNotFound := true
- function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "isRecursive": isRecursive,
- }
- for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
- if objIncompl.Key != "" {
- incompObjNotFound = false
- break
- }
- }
- if !incompObjNotFound {
- logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
- return
- }
-
- function = "GetObject(bucketName, objectName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- }
- newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- newReadBytes, err := ioutil.ReadAll(newReader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- newReader.Close()
-
- if !bytes.Equal(newReadBytes, buf) {
- logError(testName, function, args, startTime, "", "Bytes mismatch", err)
- return
- }
-
- function = "FGetObject(bucketName, objectName, fileName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "fileName": fileName + "-f",
- }
- err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FgetObject failed", err)
- return
- }
-
- // Generate presigned HEAD object url.
- function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "expires": 3600 * time.Second,
- }
- presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
- return
- }
- // Verify if presigned url works.
- resp, err := http.Head(presignedHeadURL.String())
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
- return
- }
- if resp.StatusCode != http.StatusOK {
- logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
- return
- }
- if resp.Header.Get("ETag") == "" {
- logError(testName, function, args, startTime, "", "Got empty ETag", err)
- return
- }
- resp.Body.Close()
-
- // Generate presigned GET object url.
- function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName,
- "expires": 3600 * time.Second,
- }
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
- return
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
- return
- }
- if resp.StatusCode != http.StatusOK {
- logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
- return
- }
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- resp.Body.Close()
- if !bytes.Equal(newPresignedBytes, buf) {
- logError(testName, function, args, startTime, "", "Bytes mismatch", err)
- return
- }
-
- // Set request parameters.
- reqParams := make(url.Values)
- reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- // Generate presigned GET object url.
- args["reqParams"] = reqParams
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
- return
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
- return
- }
- if resp.StatusCode != http.StatusOK {
- logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
- return
- }
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- logError(testName, function, args, startTime, "", "Bytes mismatch", err)
- return
- }
- // Verify content disposition.
- if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
- return
- }
-
- function = "PresignedPutObject(bucketName, objectName, expires)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName + "-presigned",
- "expires": 3600 * time.Second,
- }
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
- if err != nil {
- logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
- return
- }
-
- // Generate data more than 32K
- buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
-
- req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
- if err != nil {
- logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
- return
- }
- httpClient := &http.Client{
- // Setting a sensible time out of 30secs to wait for response
- // headers. Request is pro-actively cancelled after 30secs
- // with no response.
- Timeout: 30 * time.Second,
- Transport: http.DefaultTransport,
- }
- resp, err = httpClient.Do(req)
- if err != nil {
- logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
- return
- }
-
- function = "GetObject(bucketName, objectName)"
- functionAll += ", " + function
- args = map[string]interface{}{
- "bucketName": bucketName,
- "objectName": objectName + "-presigned",
- }
- newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObject failed", err)
- return
- }
-
- newReadBytes, err = ioutil.ReadAll(newReader)
- if err != nil {
- logError(testName, function, args, startTime, "", "ReadAll failed", err)
- return
- }
- newReader.Close()
-
- if !bytes.Equal(newReadBytes, buf) {
- logError(testName, function, args, startTime, "", "Bytes mismatch", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- if err = os.Remove(fileName); err != nil {
- logError(testName, function, args, startTime, "", "File remove failed", err)
- return
- }
- if err = os.Remove(fileName + "-f"); err != nil {
- logError(testName, function, args, startTime, "", "File removes failed", err)
- return
- }
- successLogger(testName, functionAll, args, startTime).Info()
-}
-
-// Test get object with GetObjectWithContext
-func testGetObjectWithContext() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObjectWithContext(ctx, bucketName, objectName)"
- args := map[string]interface{}{
- "ctx": "",
- "bucketName": "",
- "objectName": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
- return
- }
-
- if _, err = r.Stat(); err == nil {
- logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
- return
- }
- r.Close()
-
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- args["ctx"] = ctx
- defer cancel()
-
- // Read the data back
- r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "object Stat call failed", err)
- return
- }
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
- return
- }
- if err := r.Close(); err != nil {
- logError(testName, function, args, startTime, "", "object Close() call failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object with FGetObjectWithContext
-func testFGetObjectWithContext() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)"
- args := map[string]interface{}{
- "ctx": "",
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-1-MB"]
- var reader = getDataReader("datafile-1-MB")
- defer reader.Close()
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- fileName := "tempfile-context"
- args["fileName"] = fileName
- // Read the data back
- err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
- if err == nil {
- logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
- return
- }
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- defer cancel()
-
- // Read the data back
- err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err)
- return
- }
- if err = os.Remove(fileName + "-fcontext"); err != nil {
- logError(testName, function, args, startTime, "", "Remove file failed", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object ACLs with GetObjectACL
-func testGetObjectACL() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObjectACL(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // skipping region functional tests for non s3 runs
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
- return
- }
-
- // Instantiate new minio client object.
- c, err := minio.NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-1-MB"]
- var reader = getDataReader("datafile-1-MB")
- defer reader.Close()
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Add meta data to add a canned acl
- metaData := map[string]string{
- "X-Amz-Acl": "public-read-write",
- }
-
- _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- // Read the data back
- objectInfo, getObjectACLErr := c.GetObjectACL(bucketName, objectName)
- if getObjectACLErr == nil {
- logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr)
- return
- }
-
- s, ok := objectInfo.Metadata["X-Amz-Acl"]
- if !ok {
- logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil)
- return
- }
-
- if len(s) != 1 {
- logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
- return
- }
-
- if s[0] != "public-read-write" {
- logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil)
- return
- }
-
- bufSize = dataFileMap["datafile-1-MB"]
- var reader2 = getDataReader("datafile-1-MB")
- defer reader2.Close()
- // Save the data
- objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- // Add meta data to add a canned acl
- metaData = map[string]string{
- "X-Amz-Grant-Read": "id=fooread@minio.go",
- "X-Amz-Grant-Write": "id=foowrite@minio.go",
- }
-
- _, err = c.PutObject(bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject failed", err)
- return
- }
-
- // Read the data back
- objectInfo, getObjectACLErr = c.GetObjectACL(bucketName, objectName)
- if getObjectACLErr == nil {
- logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr)
- return
- }
-
- if len(objectInfo.Metadata) != 3 {
- logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil)
- return
- }
-
- s, ok = objectInfo.Metadata["X-Amz-Grant-Read"]
- if !ok {
- logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil)
- return
- }
-
- if len(s) != 1 {
- logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
- return
- }
-
- if s[0] != "fooread@minio.go" {
- logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil)
- return
- }
-
- s, ok = objectInfo.Metadata["X-Amz-Grant-Write"]
- if !ok {
- logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil)
- return
- }
-
- if len(s) != 1 {
- logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil)
- return
- }
-
- if s[0] != "foowrite@minio.go" {
- logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-}
-
-// Test validates putObject with context to see if request cancellation is honored for V2.
-func testPutObjectWithContextV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)"
- args := map[string]interface{}{
- "ctx": "",
- "bucketName": "",
- "objectName": "",
- "size": "",
- "opts": "",
- }
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
- defer c.RemoveBucket(bucketName)
- bufSize := dataFileMap["datatfile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
- args["objectName"] = objectName
-
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- args["ctx"] = ctx
- args["size"] = bufSize
- defer cancel()
-
- _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err)
- return
- }
-
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- args["ctx"] = ctx
-
- defer cancel()
- reader = getDataReader("datafile-33-kB")
- defer reader.Close()
- _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object with GetObjectWithContext
-func testGetObjectWithContextV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "GetObjectWithContext(ctx, bucketName, objectName)"
- args := map[string]interface{}{
- "ctx": "",
- "bucketName": "",
- "objectName": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- return
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
- return
- }
- if _, err = r.Stat(); err == nil {
- logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
- return
- }
- r.Close()
-
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- defer cancel()
-
- // Read the data back
- r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err)
- return
- }
-
- st, err := r.Stat()
- if err != nil {
- logError(testName, function, args, startTime, "", "object Stat call failed", err)
- return
- }
- if st.Size != int64(bufSize) {
- logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
- return
- }
- if err := r.Close(); err != nil {
- logError(testName, function, args, startTime, "", " object Close() call failed", err)
- return
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object with FGetObjectWithContext
-func testFGetObjectWithContextV2() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)"
- args := map[string]interface{}{
- "ctx": "",
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
- return
- }
-
- bufSize := dataFileMap["datatfile-1-MB"]
- var reader = getDataReader("datafile-1-MB")
- defer reader.Close()
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- args["objectName"] = objectName
-
- _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject call failed", err)
- return
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
- args["ctx"] = ctx
- defer cancel()
-
- fileName := "tempfile-context"
- args["fileName"] = fileName
-
- // Read the data back
- err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
- if err == nil {
- logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
- return
- }
- ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
- defer cancel()
-
- // Read the data back
- err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
- if err != nil {
- logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err)
- return
- }
-
- if err = os.Remove(fileName + "-fcontext"); err != nil {
- logError(testName, function, args, startTime, "", "Remove file failed", err)
- return
- }
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test list object v1 and V2 storage class fields
-func testListObjects() {
- // initialize logging params
- startTime := time.Now()
- testName := getFuncName()
- function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectPrefix": "",
- "recursive": "true",
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableHTTPS)),
- )
- if err != nil {
- logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
- return
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
- args["bucketName"] = bucketName
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- logError(testName, function, args, startTime, "", "MakeBucket failed", err)
- return
- }
-
- bufSize := dataFileMap["datafile-33-kB"]
- var reader = getDataReader("datafile-33-kB")
- defer reader.Close()
-
- // Save the data
- objectName1 := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- _, err = c.PutObject(bucketName, objectName1, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "STANDARD"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject1 call failed", err)
- return
- }
-
- bufSize1 := dataFileMap["datafile-33-kB"]
- var reader1 = getDataReader("datafile-33-kB")
- defer reader1.Close()
- objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- _, err = c.PutObject(bucketName, objectName2, reader1, int64(bufSize1), minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: "REDUCED_REDUNDANCY"})
- if err != nil {
- logError(testName, function, args, startTime, "", "PutObject2 call failed", err)
- return
- }
-
- // Create a done channel to control 'ListObjects' go routine.
- doneCh := make(chan struct{})
- // Exit cleanly upon return.
- defer close(doneCh)
-
- // check for storage-class from ListObjects result
- for objInfo := range c.ListObjects(bucketName, "", true, doneCh) {
- if objInfo.Err != nil {
- logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err)
- return
- }
- if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" {
- // Ignored as Gateways (Azure/GCS etc) wont return storage class
- ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info()
- }
- if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" {
- // Ignored as Gateways (Azure/GCS etc) wont return storage class
- ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info()
- }
- }
-
- // check for storage-class from ListObjectsV2 result
- for objInfo := range c.ListObjectsV2(bucketName, "", true, doneCh) {
- if objInfo.Err != nil {
- logError(testName, function, args, startTime, "", "ListObjectsV2 failed unexpectedly", err)
- return
- }
- if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" {
- // Ignored as Gateways (Azure/GCS etc) wont return storage class
- ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info()
- }
- if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" {
- // Ignored as Gateways (Azure/GCS etc) wont return storage class
- ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info()
- }
- }
-
- // Delete all objects and buckets
- if err = cleanupBucket(bucketName, c); err != nil {
- logError(testName, function, args, startTime, "", "Cleanup failed", err)
- return
- }
-
- successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Convert string to bool and always return false if any error
-func mustParseBool(str string) bool {
- b, err := strconv.ParseBool(str)
- if err != nil {
- return false
- }
- return b
-}
-
-func main() {
- // Output to stdout instead of the default stderr
- log.SetOutput(os.Stdout)
- // create custom formatter
- mintFormatter := mintJSONFormatter{}
- // set custom formatter
- log.SetFormatter(&mintFormatter)
- // log Info or above -- success cases are Info level, failures are Fatal level
- log.SetLevel(log.InfoLevel)
-
- tls := mustParseBool(os.Getenv(enableHTTPS))
- kmsEnabled := mustParseBool(os.Getenv(enableKMS))
- // execute tests
- if isFullMode() {
- testMakeBucketErrorV2()
- testGetObjectClosedTwiceV2()
- testFPutObjectV2()
- testMakeBucketRegionsV2()
- testGetObjectReadSeekFunctionalV2()
- testGetObjectReadAtFunctionalV2()
- testCopyObjectV2()
- testFunctionalV2()
- testComposeObjectErrorCasesV2()
- testCompose10KSourcesV2()
- testUserMetadataCopyingV2()
- testPutObject0ByteV2()
- testPutObjectNoLengthV2()
- testPutObjectsUnknownV2()
- testGetObjectWithContextV2()
- testFPutObjectWithContextV2()
- testFGetObjectWithContextV2()
- testPutObjectWithContextV2()
- testMakeBucketError()
- testMakeBucketRegions()
- testPutObjectWithMetadata()
- testPutObjectReadAt()
- testPutObjectStreaming()
- testGetObjectSeekEnd()
- testGetObjectClosedTwice()
- testRemoveMultipleObjects()
- testFPutObjectMultipart()
- testFPutObject()
- testGetObjectReadSeekFunctional()
- testGetObjectReadAtFunctional()
- testPresignedPostPolicy()
- testCopyObject()
- testComposeObjectErrorCases()
- testCompose10KSources()
- testUserMetadataCopying()
- testBucketNotification()
- testFunctional()
- testGetObjectModified()
- testPutObjectUploadSeekedObject()
- testGetObjectWithContext()
- testFPutObjectWithContext()
- testFGetObjectWithContext()
-
- testGetObjectACL()
-
- testPutObjectWithContext()
- testStorageClassMetadataPutObject()
- testStorageClassInvalidMetadataPutObject()
- testStorageClassMetadataCopyObject()
- testPutObjectWithContentLanguage()
- testListObjects()
-
- // SSE-C tests will only work over TLS connection.
- if tls {
- testSSECEncryptionPutGet()
- testSSECEncryptionFPut()
- testSSECEncryptedGetObjectReadAtFunctional()
- testSSECEncryptedGetObjectReadSeekFunctional()
- testEncryptedCopyObjectV2()
- testEncryptedSSECToSSECCopyObject()
- testEncryptedSSECToUnencryptedCopyObject()
- testUnencryptedToSSECCopyObject()
- testUnencryptedToUnencryptedCopyObject()
- testEncryptedEmptyObject()
- testDecryptedCopyObject()
- testSSECEncryptedToSSECCopyObjectPart()
- testSSECEncryptedToUnencryptedCopyPart()
- testUnencryptedToSSECCopyObjectPart()
- testUnencryptedToUnencryptedCopyPart()
- if kmsEnabled {
- testSSES3EncryptionPutGet()
- testSSES3EncryptionFPut()
- testSSES3EncryptedGetObjectReadAtFunctional()
- testSSES3EncryptedGetObjectReadSeekFunctional()
- testEncryptedSSECToSSES3CopyObject()
- testEncryptedSSES3ToSSECCopyObject()
- testEncryptedSSES3ToSSES3CopyObject()
- testEncryptedSSES3ToUnencryptedCopyObject()
- testUnencryptedToSSES3CopyObject()
- testSSECEncryptedToSSES3CopyObjectPart()
- testUnencryptedToSSES3CopyObjectPart()
- testSSES3EncryptedToSSECCopyObjectPart()
- testSSES3EncryptedToUnencryptedCopyPart()
- testSSES3EncryptedToSSES3CopyObjectPart()
- }
- }
- } else {
- testFunctional()
- testFunctionalV2()
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go
deleted file mode 100644
index a6c466ae8..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "go.opencensus.io/trace"
- "golang.org/x/net/context"
-
- "google.golang.org/grpc/stats"
-)
-
-// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
-// traces. Use with gRPC clients only.
-type ClientHandler struct {
- // StartOptions allows configuring the StartOptions used to create new spans.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = c.traceTagRPC(ctx, rti)
- ctx = c.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
deleted file mode 100644
index abe978b67..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ClientHandler:
-var (
- ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
- ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
- ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
- ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
-)
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ClientSentBytesPerRPCView = &view.View{
- Measure: ClientSentBytesPerRPC,
- Name: "grpc.io/client/sent_bytes_per_rpc",
- Description: "Distribution of bytes sent per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientReceivedBytesPerRPCView = &view.View{
- Measure: ClientReceivedBytesPerRPC,
- Name: "grpc.io/client/received_bytes_per_rpc",
- Description: "Distribution of bytes received per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientRoundtripLatencyView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/roundtrip_latency",
- Description: "Distribution of round-trip latency, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- ClientCompletedRPCsView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- Aggregation: view.Count(),
- }
-
- ClientSentMessagesPerRPCView = &view.View{
- Measure: ClientSentMessagesPerRPC,
- Name: "grpc.io/client/sent_messages_per_rpc",
- Description: "Distribution of sent messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientReceivedMessagesPerRPCView = &view.View{
- Measure: ClientReceivedMessagesPerRPC,
- Name: "grpc.io/client/received_messages_per_rpc",
- Description: "Distribution of received messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientServerLatencyView = &view.View{
- Measure: ClientServerLatency,
- Name: "grpc.io/client/server_latency",
- Description: "Distribution of server latency as viewed by client, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-var DefaultClientViews = []*view.View{
- ClientSentBytesPerRPCView,
- ClientReceivedBytesPerRPCView,
- ClientRoundtripLatencyView,
- ClientCompletedRPCsView,
-}
-
-// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
deleted file mode 100644
index 303c607f6..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "time"
-
- "go.opencensus.io/tag"
- "golang.org/x/net/context"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the tag.Map populated by the application code, serializes
-// its tags into the GRPC metadata in order to be sent to the server.
-func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName)
- }
- return ctx
- }
-
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- ts := tag.FromContext(ctx)
- if ts != nil {
- encoded := tag.Encode(ts)
- ctx = stats.SetTags(ctx, encoded)
- }
-
- return context.WithValue(ctx, rpcDataKey, d)
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
deleted file mode 100644
index 1370323fb..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ocgrpc contains OpenCensus stats and trace
-// integrations for gRPC.
-//
-// Use ServerHandler for servers and ClientHandler for clients.
-package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go
deleted file mode 100644
index b67b3e2be..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "go.opencensus.io/trace"
- "golang.org/x/net/context"
-
- "google.golang.org/grpc/stats"
-)
-
-// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
-// traces. Use with gRPC servers.
-//
-// When installed (see Example), tracing metadata is read from inbound RPCs
-// by default. If no tracing metadata is present, or if the tracing metadata is
-// present but the SpanContext isn't sampled, then a new trace may be started
-// (as determined by Sampler).
-type ServerHandler struct {
- // IsPublicEndpoint may be set to true to always start a new trace around
- // each RPC. Any SpanContext in the RPC metadata will be added as a linked
- // span instead of making it the parent of the span created around the
- // server RPC.
- //
- // Be aware that if you leave this false (the default) on a public-facing
- // server, callers will be able to send tracing metadata in gRPC headers
- // and trigger traces in your backend.
- IsPublicEndpoint bool
-
- // StartOptions to use for to spans started around RPCs handled by this server.
- //
- // These will apply even if there is tracing metadata already
- // present on the inbound RPC but the SpanContext is not sampled. This
- // ensures that each service has some opportunity to be traced. If you would
- // like to not add any additional traces for this gRPC service, set:
- //
- // StartOptions.Sampler = trace.ProbabilitySampler(0.0)
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-var _ stats.Handler = (*ServerHandler)(nil)
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = s.traceTagRPC(ctx, rti)
- ctx = s.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
deleted file mode 100644
index 609d9ed24..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ServerHandler:
-var (
- ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
- ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
- ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
-)
-
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ServerReceivedBytesPerRPCView = &view.View{
- Name: "grpc.io/server/received_bytes_per_rpc",
- Description: "Distribution of received bytes per RPC, by method.",
- Measure: ServerReceivedBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerSentBytesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_bytes_per_rpc",
- Description: "Distribution of total sent bytes per RPC, by method.",
- Measure: ServerSentBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "grpc.io/server/server_latency",
- Description: "Distribution of server latency in milliseconds, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerLatency,
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- ServerCompletedRPCsView = &view.View{
- Name: "grpc.io/server/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-
- ServerReceivedMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/received_messages_per_rpc",
- Description: "Distribution of messages received count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerReceivedMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ServerSentMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_messages_per_rpc",
- Description: "Distribution of messages sent count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerSentMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-)
-
-// DefaultServerViews are the default server views provided by this package.
-var DefaultServerViews = []*view.View{
- ServerReceivedBytesPerRPCView,
- ServerSentBytesPerRPCView,
- ServerLatencyView,
- ServerCompletedRPCsView,
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
deleted file mode 100644
index 7847c1a91..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "time"
-
- "golang.org/x/net/context"
-
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
-// it and creates a new tag.Map and puts them into the returned context.
-func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Infof("opencensus: TagRPC called with nil info.")
- }
- return ctx
- }
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- propagated := h.extractPropagatedTags(ctx)
- ctx = tag.NewContext(ctx, propagated)
- ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
- return context.WithValue(ctx, rpcDataKey, d)
-}
-
-// extractPropagatedTags creates a new tag map containing the tags extracted from the
-// gRPC metadata.
-func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
- buf := stats.Tags(ctx)
- if buf == nil {
- return nil
- }
- propagated, err := tag.Decode(buf)
- if err != nil {
- if grpclog.V(2) {
- grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
- }
- return nil
- }
- return propagated
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
deleted file mode 100644
index e9991fe0f..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "context"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- ocstats "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-type grpcInstrumentationKey string
-
-// rpcData holds the instrumentation RPC data that is needed between the start
-// and end of an call. It holds the info that this package needs to keep track
-// of between the various GRPC events.
-type rpcData struct {
- // reqCount and respCount has to be the first words
- // in order to be 64-aligned on 32-bit architectures.
- sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
-
- // startTime represents the time at which TagRPC was invoked at the
- // beginning of an RPC. It is an appoximation of the time when the
- // application code invoked GRPC code.
- startTime time.Time
- method string
-}
-
-// The following variables define the default hard-coded auxiliary data used by
-// both the default GRPC client and GRPC server metrics.
-var (
- DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
- DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
-)
-
-// Server tags are applied to the context used to process each RPC, as well as
-// the measures at the end of each RPC.
-var (
- KeyServerMethod, _ = tag.NewKey("grpc_server_method")
- KeyServerStatus, _ = tag.NewKey("grpc_server_status")
-)
-
-// Client tags are applied to measures at the end of each RPC.
-var (
- KeyClientMethod, _ = tag.NewKey("grpc_client_method")
- KeyClientStatus, _ = tag.NewKey("grpc_client_status")
-)
-
-var (
- rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
-)
-
-func methodName(fullname string) string {
- return strings.TrimLeft(fullname, "/")
-}
-
-// statsHandleRPC processes the RPC events.
-func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
- switch st := s.(type) {
- case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
- // do nothing for client
- case *stats.OutPayload:
- handleRPCOutPayload(ctx, st)
- case *stats.InPayload:
- handleRPCInPayload(ctx, st)
- case *stats.End:
- handleRPCEnd(ctx, st)
- default:
- grpclog.Infof("unexpected stats: %T", st)
- }
-}
-
-func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.sentBytes, int64(s.Length))
- atomic.AddInt64(&d.sentCount, 1)
-}
-
-func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.recvBytes, int64(s.Length))
- atomic.AddInt64(&d.recvCount, 1)
-}
-
-func handleRPCEnd(ctx context.Context, s *stats.End) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- elapsedTime := time.Since(d.startTime)
-
- var st string
- if s.Error != nil {
- s, ok := status.FromError(s.Error)
- if ok {
- st = statusCodeToString(s)
- }
- } else {
- st = "OK"
- }
-
- latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
- if s.Client {
- ocstats.RecordWithTags(ctx,
- []tag.Mutator{
- tag.Upsert(KeyClientMethod, methodName(d.method)),
- tag.Upsert(KeyClientStatus, st),
- },
- ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ClientRoundtripLatency.M(latencyMillis))
- } else {
- ocstats.RecordWithTags(ctx,
- []tag.Mutator{
- tag.Upsert(KeyServerStatus, st),
- },
- ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ServerLatency.M(latencyMillis))
- }
-}
-
-func statusCodeToString(s *status.Status) string {
- // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
- switch c := s.Code(); c {
- case codes.OK:
- return "OK"
- case codes.Canceled:
- return "CANCELLED"
- case codes.Unknown:
- return "UNKNOWN"
- case codes.InvalidArgument:
- return "INVALID_ARGUMENT"
- case codes.DeadlineExceeded:
- return "DEADLINE_EXCEEDED"
- case codes.NotFound:
- return "NOT_FOUND"
- case codes.AlreadyExists:
- return "ALREADY_EXISTS"
- case codes.PermissionDenied:
- return "PERMISSION_DENIED"
- case codes.ResourceExhausted:
- return "RESOURCE_EXHAUSTED"
- case codes.FailedPrecondition:
- return "FAILED_PRECONDITION"
- case codes.Aborted:
- return "ABORTED"
- case codes.OutOfRange:
- return "OUT_OF_RANGE"
- case codes.Unimplemented:
- return "UNIMPLEMENTED"
- case codes.Internal:
- return "INTERNAL"
- case codes.Unavailable:
- return "UNAVAILABLE"
- case codes.DataLoss:
- return "DATA_LOSS"
- case codes.Unauthenticated:
- return "UNAUTHENTICATED"
- default:
- return "CODE_" + strconv.FormatInt(int64(c), 10)
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
deleted file mode 100644
index 720f381c2..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "strings"
-
- "google.golang.org/grpc/codes"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
- "golang.org/x/net/context"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-const traceContextKey = "grpc-trace-bin"
-
-// TagRPC creates a new trace span for the client side of the RPC.
-//
-// It returns ctx with the new trace span added and a serialization of the
-// SpanContext added to the outgoing gRPC metadata.
-func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSampler(c.StartOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
- traceContextBinary := propagation.Binary(span.SpanContext())
- return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
-}
-
-// TagRPC creates a new trace span for the server side of the RPC.
-//
-// It checks the incoming gRPC metadata in ctx for a SpanContext, and if
-// it finds one, uses that SpanContext as the parent context of the new span.
-//
-// It returns ctx, with the new trace span added.
-func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- md, _ := metadata.FromIncomingContext(ctx)
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- traceContext := md[traceContextKey]
- var (
- parent trace.SpanContext
- haveParent bool
- )
- if len(traceContext) > 0 {
- // Metadata with keys ending in -bin are actually binary. They are base64
- // encoded before being put on the wire, see:
- // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata
- traceContextBinary := []byte(traceContext[0])
- parent, haveParent = propagation.FromBinary(traceContextBinary)
- if haveParent && !s.IsPublicEndpoint {
- ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler),
- )
- return ctx
- }
- }
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler))
- if haveParent {
- span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
- }
- return ctx
-}
-
-func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
- span := trace.FromContext(ctx)
- // TODO: compressed and uncompressed sizes are not populated in every message.
- switch rs := rs.(type) {
- case *stats.Begin:
- span.AddAttributes(
- trace.BoolAttribute("Client", rs.Client),
- trace.BoolAttribute("FailFast", rs.FailFast))
- case *stats.InPayload:
- span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength))
- case *stats.OutPayload:
- span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength))
- case *stats.End:
- if rs.Error != nil {
- s, ok := status.FromError(rs.Error)
- if ok {
- span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()})
- } else {
- span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()})
- }
- }
- span.End()
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
deleted file mode 100644
index 65ab1e996..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tracecontext contains HTTP propagator for TraceContext standard.
-// See https://github.com/w3c/distributed-tracing for more information.
-package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
-
-import (
- "encoding/hex"
- "fmt"
- "net/http"
- "net/textproto"
- "regexp"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
- "go.opencensus.io/trace/tracestate"
-)
-
-const (
- supportedVersion = 0
- maxVersion = 254
- maxTracestateLen = 512
- traceparentHeader = "traceparent"
- tracestateHeader = "tracestate"
- trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$`
-)
-
-var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt)
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// HTTPFormat implements the TraceContext trace propagation format.
-type HTTPFormat struct{}
-
-// SpanContextFromRequest extracts a span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h, ok := getRequestHeader(req, traceparentHeader, false)
- if !ok {
- return trace.SpanContext{}, false
- }
- sections := strings.Split(h, "-")
- if len(sections) < 4 {
- return trace.SpanContext{}, false
- }
-
- if len(sections[0]) != 2 {
- return trace.SpanContext{}, false
- }
- ver, err := hex.DecodeString(sections[0])
- if err != nil {
- return trace.SpanContext{}, false
- }
- version := int(ver[0])
- if version > maxVersion {
- return trace.SpanContext{}, false
- }
-
- if version == 0 && len(sections) != 4 {
- return trace.SpanContext{}, false
- }
-
- if len(sections[1]) != 32 {
- return trace.SpanContext{}, false
- }
- tid, err := hex.DecodeString(sections[1])
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], tid)
-
- if len(sections[2]) != 16 {
- return trace.SpanContext{}, false
- }
- sid, err := hex.DecodeString(sections[2])
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.SpanID[:], sid)
-
- opts, err := hex.DecodeString(sections[3])
- if err != nil || len(opts) < 1 {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(opts[0])
-
- // Don't allow all zero trace or span ID.
- if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} {
- return trace.SpanContext{}, false
- }
-
- sc.Tracestate = tracestateFromRequest(req)
- return sc, true
-}
-
-// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2.
-// If commaSeparated is true, multiple header fields with the same field name using be
-// combined using ",".
-// If no header was found using the given name, "ok" would be false.
-// If more than one headers was found using the given name, while commaSeparated is false,
-// "ok" would be false.
-func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) {
- v := req.Header[textproto.CanonicalMIMEHeaderKey(name)]
- switch len(v) {
- case 0:
- return "", false
- case 1:
- return v[0], true
- default:
- return strings.Join(v, ","), commaSeparated
- }
-}
-
-// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error.
-// Revisit to return additional boolean value to indicate parsing error when following issues
-// are resolved.
-// https://github.com/w3c/distributed-tracing/issues/172
-// https://github.com/w3c/distributed-tracing/issues/175
-func tracestateFromRequest(req *http.Request) *tracestate.Tracestate {
- h, _ := getRequestHeader(req, tracestateHeader, true)
- if h == "" {
- return nil
- }
-
- var entries []tracestate.Entry
- pairs := strings.Split(h, ",")
- hdrLenWithoutOWS := len(pairs) - 1 // Number of commas
- for _, pair := range pairs {
- matches := trimOWSRegExp.FindStringSubmatch(pair)
- if matches == nil {
- return nil
- }
- pair = matches[1]
- hdrLenWithoutOWS += len(pair)
- if hdrLenWithoutOWS > maxTracestateLen {
- return nil
- }
- kv := strings.Split(pair, "=")
- if len(kv) != 2 {
- return nil
- }
- entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]})
- }
- ts, err := tracestate.New(nil, entries...)
- if err != nil {
- return nil
- }
-
- return ts
-}
-
-func tracestateToRequest(sc trace.SpanContext, req *http.Request) {
- var pairs = make([]string, 0, len(sc.Tracestate.Entries()))
- if sc.Tracestate != nil {
- for _, entry := range sc.Tracestate.Entries() {
- pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "="))
- }
- h := strings.Join(pairs, ",")
-
- if h != "" && len(h) <= maxTracestateLen {
- req.Header.Set(tracestateHeader, h)
- }
- }
-}
-
-// SpanContextToRequest modifies the given request to include traceparent and tracestate headers.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- h := fmt.Sprintf("%x-%x-%x-%x",
- []byte{supportedVersion},
- sc.TraceID[:],
- sc.SpanID[:],
- []byte{byte(sc.TraceOptions)})
- req.Header.Set(traceparentHeader, h)
- tracestateToRequest(sc, req)
-}
diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go
deleted file mode 100644
index 372ffbb24..000000000
--- a/vendor/golang.org/x/net/publicsuffix/gen.go
+++ /dev/null
@@ -1,717 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates table.go and table_test.go based on the authoritative
-// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat
-//
-// The version is derived from
-// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat
-// and a human-readable form is at
-// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat
-//
-// To fetch a particular git revision, such as 5c70ccd250, pass
-// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat"
-// and -version "an explicit version string".
-
-import (
- "bufio"
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "regexp"
- "sort"
- "strings"
-
- "golang.org/x/net/idna"
-)
-
-const (
- // These sum of these four values must be no greater than 32.
- nodesBitsChildren = 10
- nodesBitsICANN = 1
- nodesBitsTextOffset = 15
- nodesBitsTextLength = 6
-
- // These sum of these four values must be no greater than 32.
- childrenBitsWildcard = 1
- childrenBitsNodeType = 2
- childrenBitsHi = 14
- childrenBitsLo = 14
-)
-
-var (
- maxChildren int
- maxTextOffset int
- maxTextLength int
- maxHi uint32
- maxLo uint32
-)
-
-func max(a, b int) int {
- if a < b {
- return b
- }
- return a
-}
-
-func u32max(a, b uint32) uint32 {
- if a < b {
- return b
- }
- return a
-}
-
-const (
- nodeTypeNormal = 0
- nodeTypeException = 1
- nodeTypeParentOnly = 2
- numNodeType = 3
-)
-
-func nodeTypeStr(n int) string {
- switch n {
- case nodeTypeNormal:
- return "+"
- case nodeTypeException:
- return "!"
- case nodeTypeParentOnly:
- return "o"
- }
- panic("unreachable")
-}
-
-const (
- defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat"
- gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat"
-)
-
-var (
- labelEncoding = map[string]uint32{}
- labelsList = []string{}
- labelsMap = map[string]bool{}
- rules = []string{}
- numICANNRules = 0
-
- // validSuffixRE is used to check that the entries in the public suffix
- // list are in canonical form (after Punycode encoding). Specifically,
- // capital letters are not allowed.
- validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`)
-
- shaRE = regexp.MustCompile(`"sha":"([^"]+)"`)
- dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`)
-
- comments = flag.Bool("comments", false, "generate table.go comments, for debugging")
- subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging")
- url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead")
- v = flag.Bool("v", false, "verbose output (to stderr)")
- version = flag.String("version", "", "the effective_tld_names.dat version")
-)
-
-func main() {
- if err := main1(); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
-
-func main1() error {
- flag.Parse()
- if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 {
- return fmt.Errorf("not enough bits to encode the nodes table")
- }
- if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 {
- return fmt.Errorf("not enough bits to encode the children table")
- }
- if *version == "" {
- if *url != defaultURL {
- return fmt.Errorf("-version was not specified, and the -url is not the default one")
- }
- sha, date, err := gitCommit()
- if err != nil {
- return err
- }
- *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date)
- }
- var r io.Reader = os.Stdin
- if *url != "" {
- res, err := http.Get(*url)
- if err != nil {
- return err
- }
- if res.StatusCode != http.StatusOK {
- return fmt.Errorf("bad GET status for %s: %d", *url, res.Status)
- }
- r = res.Body
- defer res.Body.Close()
- }
-
- var root node
- icann := false
- br := bufio.NewReader(r)
- for {
- s, err := br.ReadString('\n')
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- s = strings.TrimSpace(s)
- if strings.Contains(s, "BEGIN ICANN DOMAINS") {
- if len(rules) != 0 {
- return fmt.Errorf(`expected no rules before "BEGIN ICANN DOMAINS"`)
- }
- icann = true
- continue
- }
- if strings.Contains(s, "END ICANN DOMAINS") {
- icann, numICANNRules = false, len(rules)
- continue
- }
- if s == "" || strings.HasPrefix(s, "//") {
- continue
- }
- s, err = idna.ToASCII(s)
- if err != nil {
- return err
- }
- if !validSuffixRE.MatchString(s) {
- return fmt.Errorf("bad publicsuffix.org list data: %q", s)
- }
-
- if *subset {
- switch {
- case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"):
- case s == "ak.us" || strings.HasSuffix(s, ".ak.us"):
- case s == "ao" || strings.HasSuffix(s, ".ao"):
- case s == "ar" || strings.HasSuffix(s, ".ar"):
- case s == "arpa" || strings.HasSuffix(s, ".arpa"):
- case s == "cy" || strings.HasSuffix(s, ".cy"):
- case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"):
- case s == "jp":
- case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"):
- case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"):
- case s == "om" || strings.HasSuffix(s, ".om"):
- case s == "uk" || strings.HasSuffix(s, ".uk"):
- case s == "uk.com" || strings.HasSuffix(s, ".uk.com"):
- case s == "tw" || strings.HasSuffix(s, ".tw"):
- case s == "zw" || strings.HasSuffix(s, ".zw"):
- case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"):
- // xn--p1ai is Russian-Cyrillic "рф".
- default:
- continue
- }
- }
-
- rules = append(rules, s)
-
- nt, wildcard := nodeTypeNormal, false
- switch {
- case strings.HasPrefix(s, "*."):
- s, nt = s[2:], nodeTypeParentOnly
- wildcard = true
- case strings.HasPrefix(s, "!"):
- s, nt = s[1:], nodeTypeException
- }
- labels := strings.Split(s, ".")
- for n, i := &root, len(labels)-1; i >= 0; i-- {
- label := labels[i]
- n = n.child(label)
- if i == 0 {
- if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly {
- n.nodeType = nt
- }
- n.icann = n.icann && icann
- n.wildcard = n.wildcard || wildcard
- }
- labelsMap[label] = true
- }
- }
- labelsList = make([]string, 0, len(labelsMap))
- for label := range labelsMap {
- labelsList = append(labelsList, label)
- }
- sort.Strings(labelsList)
-
- if err := generate(printReal, &root, "table.go"); err != nil {
- return err
- }
- if err := generate(printTest, &root, "table_test.go"); err != nil {
- return err
- }
- return nil
-}
-
-func generate(p func(io.Writer, *node) error, root *node, filename string) error {
- buf := new(bytes.Buffer)
- if err := p(buf, root); err != nil {
- return err
- }
- b, err := format.Source(buf.Bytes())
- if err != nil {
- return err
- }
- return ioutil.WriteFile(filename, b, 0644)
-}
-
-func gitCommit() (sha, date string, retErr error) {
- res, err := http.Get(gitCommitURL)
- if err != nil {
- return "", "", err
- }
- if res.StatusCode != http.StatusOK {
- return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status)
- }
- defer res.Body.Close()
- b, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
- if m := shaRE.FindSubmatch(b); m != nil {
- sha = string(m[1])
- }
- if m := dateRE.FindSubmatch(b); m != nil {
- date = string(m[1])
- }
- if sha == "" || date == "" {
- retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL)
- }
- return sha, date, retErr
-}
-
-func printTest(w io.Writer, n *node) error {
- fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n")
- fmt.Fprintf(w, "package publicsuffix\n\nconst numICANNRules = %d\n\nvar rules = [...]string{\n", numICANNRules)
- for _, rule := range rules {
- fmt.Fprintf(w, "%q,\n", rule)
- }
- fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n")
- if err := n.walk(w, printNodeLabel); err != nil {
- return err
- }
- fmt.Fprintf(w, "}\n")
- return nil
-}
-
-func printReal(w io.Writer, n *node) error {
- const header = `// generated by go run gen.go; DO NOT EDIT
-
-package publicsuffix
-
-const version = %q
-
-const (
- nodesBitsChildren = %d
- nodesBitsICANN = %d
- nodesBitsTextOffset = %d
- nodesBitsTextLength = %d
-
- childrenBitsWildcard = %d
- childrenBitsNodeType = %d
- childrenBitsHi = %d
- childrenBitsLo = %d
-)
-
-const (
- nodeTypeNormal = %d
- nodeTypeException = %d
- nodeTypeParentOnly = %d
-)
-
-// numTLD is the number of top level domains.
-const numTLD = %d
-
-`
- fmt.Fprintf(w, header, *version,
- nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength,
- childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo,
- nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children))
-
- text := combineText(labelsList)
- if text == "" {
- return fmt.Errorf("internal error: makeText returned no text")
- }
- for _, label := range labelsList {
- offset, length := strings.Index(text, label), len(label)
- if offset < 0 {
- return fmt.Errorf("internal error: could not find %q in text %q", label, text)
- }
- maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length)
- if offset >= 1<= 1< 64 {
- n, plus = 64, " +"
- }
- fmt.Fprintf(w, "%q%s\n", text[:n], plus)
- text = text[n:]
- }
-
- if err := n.walk(w, assignIndexes); err != nil {
- return err
- }
-
- fmt.Fprintf(w, `
-
-// nodes is the list of nodes. Each node is represented as a uint32, which
-// encodes the node's children, wildcard bit and node type (as an index into
-// the children array), ICANN bit and text.
-//
-// If the table was generated with the -comments flag, there is a //-comment
-// after each node's data. In it is the nodes-array indexes of the children,
-// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The
-// nodeType is printed as + for normal, ! for exception, and o for parent-only
-// nodes that have children but don't match a domain label in their own right.
-// An I denotes an ICANN domain.
-//
-// The layout within the uint32, from MSB to LSB, is:
-// [%2d bits] unused
-// [%2d bits] children index
-// [%2d bits] ICANN bit
-// [%2d bits] text index
-// [%2d bits] text length
-var nodes = [...]uint32{
-`,
- 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength,
- nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength)
- if err := n.walk(w, printNode); err != nil {
- return err
- }
- fmt.Fprintf(w, `}
-
-// children is the list of nodes' children, the parent's wildcard bit and the
-// parent's node type. If a node has no children then their children index
-// will be in the range [0, 6), depending on the wildcard bit and node type.
-//
-// The layout within the uint32, from MSB to LSB, is:
-// [%2d bits] unused
-// [%2d bits] wildcard bit
-// [%2d bits] node type
-// [%2d bits] high nodes index (exclusive) of children
-// [%2d bits] low nodes index (inclusive) of children
-var children=[...]uint32{
-`,
- 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo,
- childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo)
- for i, c := range childrenEncoding {
- s := "---------------"
- lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0
- if *comments {
- fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n",
- c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType))
- } else {
- fmt.Fprintf(w, "0x%x,\n", c)
- }
- }
- fmt.Fprintf(w, "}\n\n")
- fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" {
- ss = ss[1:]
- }
- return ss
-}
-
-// crush combines a list of strings, taking advantage of overlaps. It returns a
-// single string that contains each input string as a substring.
-func crush(ss []string) string {
- maxLabelLen := 0
- for _, s := range ss {
- if maxLabelLen < len(s) {
- maxLabelLen = len(s)
- }
- }
-
- for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- {
- prefixes := makePrefixMap(ss, prefixLen)
- for i, s := range ss {
- if len(s) <= prefixLen {
- continue
- }
- mergeLabel(ss, i, prefixLen, prefixes)
- }
- }
-
- return strings.Join(ss, "")
-}
-
-// mergeLabel merges the label at ss[i] with the first available matching label
-// in prefixMap, where the last "prefixLen" characters in ss[i] match the first
-// "prefixLen" characters in the matching label.
-// It will merge ss[i] repeatedly until no more matches are available.
-// All matching labels merged into ss[i] are replaced by "".
-func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) {
- s := ss[i]
- suffix := s[len(s)-prefixLen:]
- for _, j := range prefixes[suffix] {
- // Empty strings mean "already used." Also avoid merging with self.
- if ss[j] == "" || i == j {
- continue
- }
- if *v {
- fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n",
- prefixLen, i, j, ss[i], ss[j], suffix)
- }
- ss[i] += ss[j][prefixLen:]
- ss[j] = ""
- // ss[i] has a new suffix, so merge again if possible.
- // Note: we only have to merge again at the same prefix length. Shorter
- // prefix lengths will be handled in the next iteration of crush's for loop.
- // Can there be matches for longer prefix lengths, introduced by the merge?
- // I believe that any such matches would by necessity have been eliminated
- // during substring removal or merged at a higher prefix length. For
- // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde"
- // would yield "abcde", which could be merged with "bcdef." However, in
- // practice "cde" would already have been elimintated by removeSubstrings.
- mergeLabel(ss, i, prefixLen, prefixes)
- return
- }
-}
-
-// prefixMap maps from a prefix to a list of strings containing that prefix. The
-// list of strings is represented as indexes into a slice of strings stored
-// elsewhere.
-type prefixMap map[string][]int
-
-// makePrefixMap constructs a prefixMap from a slice of strings.
-func makePrefixMap(ss []string, prefixLen int) prefixMap {
- prefixes := make(prefixMap)
- for i, s := range ss {
- // We use < rather than <= because if a label matches on a prefix equal to
- // its full length, that's actually a substring match handled by
- // removeSubstrings.
- if prefixLen < len(s) {
- prefix := s[:prefixLen]
- prefixes[prefix] = append(prefixes[prefix], i)
- }
- }
-
- return prefixes
-}
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
deleted file mode 100644
index 7f096fef0..000000000
--- a/vendor/golang.org/x/sync/semaphore/semaphore.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package semaphore provides a weighted semaphore implementation.
-package semaphore // import "golang.org/x/sync/semaphore"
-
-import (
- "container/list"
- "context"
- "sync"
-)
-
-type waiter struct {
- n int64
- ready chan<- struct{} // Closed when semaphore acquired.
-}
-
-// NewWeighted creates a new weighted semaphore with the given
-// maximum combined weight for concurrent access.
-func NewWeighted(n int64) *Weighted {
- w := &Weighted{size: n}
- return w
-}
-
-// Weighted provides a way to bound concurrent access to a resource.
-// The callers can request access with a given weight.
-type Weighted struct {
- size int64
- cur int64
- mu sync.Mutex
- waiters list.List
-}
-
-// Acquire acquires the semaphore with a weight of n, blocking until resources
-// are available or ctx is done. On success, returns nil. On failure, returns
-// ctx.Err() and leaves the semaphore unchanged.
-//
-// If ctx is already done, Acquire may still succeed without blocking.
-func (s *Weighted) Acquire(ctx context.Context, n int64) error {
- s.mu.Lock()
- if s.size-s.cur >= n && s.waiters.Len() == 0 {
- s.cur += n
- s.mu.Unlock()
- return nil
- }
-
- if n > s.size {
- // Don't make other Acquire calls block on one that's doomed to fail.
- s.mu.Unlock()
- <-ctx.Done()
- return ctx.Err()
- }
-
- ready := make(chan struct{})
- w := waiter{n: n, ready: ready}
- elem := s.waiters.PushBack(w)
- s.mu.Unlock()
-
- select {
- case <-ctx.Done():
- err := ctx.Err()
- s.mu.Lock()
- select {
- case <-ready:
- // Acquired the semaphore after we were canceled. Rather than trying to
- // fix up the queue, just pretend we didn't notice the cancelation.
- err = nil
- default:
- s.waiters.Remove(elem)
- }
- s.mu.Unlock()
- return err
-
- case <-ready:
- return nil
- }
-}
-
-// TryAcquire acquires the semaphore with a weight of n without blocking.
-// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
-func (s *Weighted) TryAcquire(n int64) bool {
- s.mu.Lock()
- success := s.size-s.cur >= n && s.waiters.Len() == 0
- if success {
- s.cur += n
- }
- s.mu.Unlock()
- return success
-}
-
-// Release releases the semaphore with a weight of n.
-func (s *Weighted) Release(n int64) {
- s.mu.Lock()
- s.cur -= n
- if s.cur < 0 {
- s.mu.Unlock()
- panic("semaphore: released more than held")
- }
- for {
- next := s.waiters.Front()
- if next == nil {
- break // No more waiters blocked.
- }
-
- w := next.Value.(waiter)
- if s.size-s.cur < w.n {
- // Not enough tokens for the next waiter. We could keep going (to try to
- // find a waiter with a smaller request), but under load that could cause
- // starvation for large requests; instead, we leave all remaining waiters
- // blocked.
- //
- // Consider a semaphore used as a read-write lock, with N tokens, N
- // readers, and one writer. Each reader can Acquire(1) to obtain a read
- // lock. The writer can Acquire(N) to obtain a write lock, excluding all
- // of the readers. If we allow the readers to jump ahead in the queue,
- // the writer will starve — there is always one token available for every
- // reader.
- break
- }
-
- s.cur += w.n
- s.waiters.Remove(next)
- close(w.ready)
- }
- s.mu.Unlock()
-}
diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go
deleted file mode 100644
index 4548b993d..000000000
--- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
-//This program must be run after mksyscall.go.
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-)
-
-func main() {
- in1, err := ioutil.ReadFile("syscall_darwin.go")
- if err != nil {
- log.Fatalf("can't open syscall_darwin.go: %s", err)
- }
- arch := os.Args[1]
- in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
- if err != nil {
- log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
- }
- in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
- if err != nil {
- log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
- }
- in := string(in1) + string(in2) + string(in3)
-
- trampolines := map[string]bool{}
-
- var out bytes.Buffer
-
- fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
- fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
- fmt.Fprintf(&out, "\n")
- fmt.Fprintf(&out, "// +build go1.12\n")
- fmt.Fprintf(&out, "\n")
- fmt.Fprintf(&out, "#include \"textflag.h\"\n")
- for _, line := range strings.Split(in, "\n") {
- if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
- continue
- }
- fn := line[5 : len(line)-13]
- if !trampolines[fn] {
- trampolines[fn] = true
- fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
- fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
- }
- }
- err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644)
- if err != nil {
- log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err)
- }
-}
diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go
deleted file mode 100644
index 9feddd00c..000000000
--- a/vendor/golang.org/x/sys/unix/mkpost.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// mkpost processes the output of cgo -godefs to
-// modify the generated types. It is used to clean up
-// the sys API in an architecture specific manner.
-//
-// mkpost is run after cgo -godefs; see README.md.
-package main
-
-import (
- "bytes"
- "fmt"
- "go/format"
- "io/ioutil"
- "log"
- "os"
- "regexp"
-)
-
-func main() {
- // Get the OS and architecture (using GOARCH_TARGET if it exists)
- goos := os.Getenv("GOOS")
- goarch := os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check that we are using the Docker-based build system if we should be.
- if goos == "linux" {
- if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
- os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
- os.Stderr.WriteString("See README.md\n")
- os.Exit(1)
- }
- }
-
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- log.Fatal(err)
- }
-
- // Intentionally export __val fields in Fsid and Sigset_t
- valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`)
- b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}"))
-
- // Intentionally export __fds_bits field in FdSet
- fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
- b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
-
- // If we have empty Ptrace structs, we should delete them. Only s390x emits
- // nonempty Ptrace structs.
- ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
- b = ptraceRexexp.ReplaceAll(b, nil)
-
- // Replace the control_regs union with a blank identifier for now.
- controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
- b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
-
- // Remove fields that are added by glibc
- // Note that this is unstable as the identifers are private.
- removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Convert [65]int8 to [65]byte in Utsname members to simplify
- // conversion to string; see golang.org/issue/20753
- convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
- b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
-
- // Convert [1024]int8 to [1024]byte in Ptmget members
- convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
- b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
-
- // Remove spare fields (e.g. in Statx_t)
- spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
- b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove cgo padding fields
- removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
- b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove padding, hidden, or unused fields
- removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove the first line of warning from cgo
- b = b[bytes.IndexByte(b, '\n')+1:]
- // Modify the command in the header to include:
- // mkpost, our own warning, and a build tag.
- replacement := fmt.Sprintf(`$1 | go run mkpost.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s,%s`, goarch, goos)
- cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
- b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
-
- // gofmt
- b, err = format.Source(b)
- if err != nil {
- log.Fatal(err)
- }
-
- os.Stdout.Write(b)
-}
diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go
deleted file mode 100644
index e4af9424e..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_darwin.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named errno.
-
-A line beginning with //sysnb is like //sys, except that the
-goroutine will not be suspended during the execution of the system
-call. This must only be used for system calls which can never
-block, as otherwise the system call could cause all goroutines to
-hang.
-*/
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- plan9 = flag.Bool("plan9", false, "plan9")
- openbsd = flag.Bool("openbsd", false, "openbsd")
- netbsd = flag.Bool("netbsd", false, "netbsd")
- dragonfly = flag.Bool("dragonfly", false, "dragonfly")
- arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
- tags = flag.String("tags", "", "build tags")
- filename = flag.String("output", "", "output file name (standard output if omitted)")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- // Get the OS and architecture (using GOARCH_TARGET if it exists)
- goos := os.Getenv("GOOS")
- if goos == "" {
- fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
- os.Exit(1)
- }
- goarch := os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
-
- // Check that we are using the Docker-based build system if we should
- if goos == "linux" {
- if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
- fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
- fmt.Fprintf(os.Stderr, "See README.md\n")
- os.Exit(1)
- }
- }
-
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- libc := false
- if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") {
- libc = true
- }
- trampolines := map[string]bool{}
-
- text := ""
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, errno error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
-
- // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
- if goos == "darwin" && !libc && funct == "ClockGettime" {
- continue
- }
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // Go function header.
- outDecl := ""
- if len(out) > 0 {
- outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
- }
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
-
- // Check if err return available
- errvar := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- break
- }
- }
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\tvar _p%d *byte\n", n)
- text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\tvar _p%d *byte\n", n)
- text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass dummy pointer in that case.
- // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
- text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
- text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
- args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
- n++
- } else if p.Type == "int64" && (*openbsd || *netbsd) {
- args = append(args, "0")
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else if endianness == "little-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- } else if p.Type == "int64" && *dragonfly {
- if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
- args = append(args, "0")
- }
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else if endianness == "little-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
- if len(args)%2 == 1 && *arm {
- // arm abi specifies 64-bit argument uses
- // (even, odd) pair
- args = append(args, "0")
- }
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- }
-
- // Determine which form to use; pad args with zeros.
- asm := "Syscall"
- if nonblock != nil {
- if errvar == "" && goos == "linux" {
- asm = "RawSyscallNoError"
- } else {
- asm = "RawSyscall"
- }
- } else {
- if errvar == "" && goos == "linux" {
- asm = "SyscallNoError"
- }
- }
- if len(args) <= 3 {
- for len(args) < 3 {
- args = append(args, "0")
- }
- } else if len(args) <= 6 {
- asm += "6"
- for len(args) < 6 {
- args = append(args, "0")
- }
- } else if len(args) <= 9 {
- asm += "9"
- for len(args) < 9 {
- args = append(args, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
- }
-
- // System call number.
- if sysname == "" {
- sysname = "SYS_" + funct
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToUpper(sysname)
- }
-
- var libcFn string
- if libc {
- asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
- sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
- sysname = strings.ToLower(sysname) // lowercase
- if sysname == "getdirentries64" {
- // Special case - libSystem name and
- // raw syscall name don't match.
- sysname = "__getdirentries64"
- }
- libcFn = sysname
- sysname = "funcPC(libc_" + sysname + "_trampoline)"
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
-
- // Assign return values.
- body := ""
- ret := []string{"_", "_", "_"}
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" && !*plan9 {
- reg = "e1"
- ret[2] = reg
- doErrno = true
- } else if p.Name == "err" && *plan9 {
- ret[0] = "r0"
- ret[2] = "e1"
- break
- } else {
- reg = fmt.Sprintf("r%d", i)
- ret[i] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%s != 0", reg)
- }
- if p.Type == "int64" && endianness != "" {
- // 64-bit number in r1:r0 or r0:r1.
- if i+2 > len(out) {
- fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
- }
- if endianness == "big-endian" {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
- } else {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
- }
- ret[i] = fmt.Sprintf("r%d", i)
- ret[i+1] = fmt.Sprintf("r%d", i+1)
- }
- if reg != "e1" || *plan9 {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
- text += fmt.Sprintf("\t%s\n", call)
- } else {
- if errvar == "" && goos == "linux" {
- // raw syscall without error on Linux, see golang.org/issue/22924
- text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
- } else {
- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
- }
- }
- text += body
-
- if *plan9 && ret[2] == "e1" {
- text += "\tif int32(r0) == -1 {\n"
- text += "\t\terr = e1\n"
- text += "\t}\n"
- } else if doErrno {
- text += "\tif e1 != 0 {\n"
- text += "\t\terr = errnoErr(e1)\n"
- text += "\t}\n"
- }
- text += "\treturn\n"
- text += "}\n\n"
-
- if libc && !trampolines[libcFn] {
- // some system calls share a trampoline, like read and readlen.
- trampolines[libcFn] = true
- // Declare assembly trampoline.
- text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
- // Assembly trampoline calls the libc_* function, which this magic
- // redirects to use the function from libSystem.
- text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
- text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
- text += "\n"
- }
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
deleted file mode 100644
index 3be3cdfc3..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_aix.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-*/
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- aix = flag.Bool("aix", false, "aix")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- text := ""
- cExtern := "/*\n#include \n#include \n"
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // Check if value return, err return available
- errvar := ""
- retvar := ""
- rettype := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- } else {
- retvar = p.Name
- rettype = p.Type
- }
- }
-
- // System call name.
- if sysname == "" {
- sysname = funct
- }
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- cRettype := ""
- if rettype == "unsafe.Pointer" {
- cRettype = "uintptr_t"
- } else if rettype == "uintptr" {
- cRettype = "uintptr_t"
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
- cRettype = "uintptr_t"
- } else if rettype == "int" {
- cRettype = "int"
- } else if rettype == "int32" {
- cRettype = "int"
- } else if rettype == "int64" {
- cRettype = "long long"
- } else if rettype == "uint32" {
- cRettype = "unsigned int"
- } else if rettype == "uint64" {
- cRettype = "unsigned long long"
- } else {
- cRettype = "int"
- }
- if sysname == "exit" {
- cRettype = "void"
- }
-
- // Change p.Types to c
- var cIn []string
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "string" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t", "size_t")
- } else if p.Type == "unsafe.Pointer" {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "uintptr" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "int" {
- cIn = append(cIn, "int")
- } else if p.Type == "int32" {
- cIn = append(cIn, "int")
- } else if p.Type == "int64" {
- cIn = append(cIn, "long long")
- } else if p.Type == "uint32" {
- cIn = append(cIn, "unsigned int")
- } else if p.Type == "uint64" {
- cIn = append(cIn, "unsigned long long")
- } else {
- cIn = append(cIn, "int")
- }
- }
-
- if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- cExtern += "#define c_select select\n"
- }
- // Imports of system calls from libc
- cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
- cIn := strings.Join(cIn, ", ")
- cExtern += fmt.Sprintf("(%s);\n", cIn)
- }
-
- // So file name.
- if *aix {
- if modname == "" {
- modname = "libc.a/shr_64.o"
- } else {
- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
- os.Exit(1)
- }
- }
-
- strconvfunc := "C.CString"
-
- // Go function header.
- if outps != "" {
- outps = fmt.Sprintf(" (%s)", outps)
- }
- if text != "" {
- text += "\n"
- }
-
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- argN := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
- n++
- text += fmt.Sprintf("\tvar _p%d int\n", n)
- text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
- args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
- n++
- } else if p.Type == "int64" && endianness != "" {
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- n++
- } else if p.Type == "bool" {
- text += fmt.Sprintf("\tvar _p%d uint32\n", n)
- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
- args = append(args, fmt.Sprintf("_p%d", n))
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
- } else if p.Type == "unsafe.Pointer" {
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
- } else if p.Type == "int" {
- if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
- args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
- } else if argN == 0 && funct == "fcntl" {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- }
- } else if p.Type == "int32" {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- } else if p.Type == "int64" {
- args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
- } else if p.Type == "uint32" {
- args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
- } else if p.Type == "uint64" {
- args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
- } else if p.Type == "uintptr" {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- }
- argN++
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := ""
- if sysname == "exit" {
- if errvar != "" {
- call += "er :="
- } else {
- call += ""
- }
- } else if errvar != "" {
- call += "r0,er :="
- } else if retvar != "" {
- call += "r0,_ :="
- } else {
- call += ""
- }
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
- } else {
- call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
- }
-
- // Assign return values.
- body := ""
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- } else {
- reg = "r0"
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
-
- // verify return
- if sysname != "exit" && errvar != "" {
- if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
- body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- } else {
- body += "\tif (r0 ==-1 && er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- }
- } else if errvar != "" {
- body += "\tif (er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- }
-
- text += fmt.Sprintf("\t%s\n", call)
- text += body
-
- text += "\treturn\n"
- text += "}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-
-%s
-*/
-import "C"
-import (
- "unsafe"
-)
-
-
-%s
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
deleted file mode 100644
index c96009951..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_aix.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-
-
-This program will generate three files and handle both gc and gccgo implementation:
- - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
- - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
- - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
-
- The generated code looks like this
-
-zsyscall_aix_ppc64.go
-func asyscall(...) (n int, err error) {
- // Pointer Creation
- r1, e1 := callasyscall(...)
- // Type Conversion
- // Error Handler
- return
-}
-
-zsyscall_aix_ppc64_gc.go
-//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
-//go:linkname libc_asyscall libc_asyscall
-var asyscall syscallFunc
-
-func callasyscall(...) (r1 uintptr, e1 Errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
- return
-}
-
-zsyscall_aix_ppc64_ggcgo.go
-
-// int asyscall(...)
-
-import "C"
-
-func callasyscall(...) (r1 uintptr, e1 Errno) {
- r1 = uintptr(C.asyscall(...))
- e1 = syscall.GetErrno()
- return
-}
-*/
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- aix = flag.Bool("aix", false, "aix")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- // GCCGO
- textgccgo := ""
- cExtern := "/*\n#include \n"
- // GC
- textgc := ""
- dynimports := ""
- linknames := ""
- var vars []string
- // COMMON
- textcommon := ""
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- if sysname == "" {
- sysname = funct
- }
-
- onlyCommon := false
- if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
- // This function call another syscall which is already implemented.
- // Therefore, the gc and gccgo part must not be generated.
- onlyCommon = true
- }
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
-
- textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- if !onlyCommon {
- textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- }
-
- // Check if value return, err return available
- errvar := ""
- rettype := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- } else {
- rettype = p.Type
- }
- }
-
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- // GCCGO Prototype return type
- cRettype := ""
- if rettype == "unsafe.Pointer" {
- cRettype = "uintptr_t"
- } else if rettype == "uintptr" {
- cRettype = "uintptr_t"
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
- cRettype = "uintptr_t"
- } else if rettype == "int" {
- cRettype = "int"
- } else if rettype == "int32" {
- cRettype = "int"
- } else if rettype == "int64" {
- cRettype = "long long"
- } else if rettype == "uint32" {
- cRettype = "unsigned int"
- } else if rettype == "uint64" {
- cRettype = "unsigned long long"
- } else {
- cRettype = "int"
- }
- if sysname == "exit" {
- cRettype = "void"
- }
-
- // GCCGO Prototype arguments type
- var cIn []string
- for i, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "string" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t", "size_t")
- } else if p.Type == "unsafe.Pointer" {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "uintptr" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "int" {
- if (i == 0 || i == 2) && funct == "fcntl" {
- // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
- cIn = append(cIn, "uintptr_t")
- } else {
- cIn = append(cIn, "int")
- }
-
- } else if p.Type == "int32" {
- cIn = append(cIn, "int")
- } else if p.Type == "int64" {
- cIn = append(cIn, "long long")
- } else if p.Type == "uint32" {
- cIn = append(cIn, "unsigned int")
- } else if p.Type == "uint64" {
- cIn = append(cIn, "unsigned long long")
- } else {
- cIn = append(cIn, "int")
- }
- }
-
- if !onlyCommon {
- // GCCGO Prototype Generation
- // Imports of system calls from libc
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- cExtern += "#define c_select select\n"
- }
- cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
- cIn := strings.Join(cIn, ", ")
- cExtern += fmt.Sprintf("(%s);\n", cIn)
- }
- // GC Library name
- if modname == "" {
- modname = "libc.a/shr_64.o"
- } else {
- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
- os.Exit(1)
- }
- sysvarname := fmt.Sprintf("libc_%s", sysname)
-
- if !onlyCommon {
- // GC Runtime import of function to allow cross-platform builds.
- dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
- // GC Link symbol to proc address variable.
- linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
- // GC Library proc address variable.
- vars = append(vars, sysvarname)
- }
-
- strconvfunc := "BytePtrFromString"
- strconvtype := "*byte"
-
- // Go function header.
- if outps != "" {
- outps = fmt.Sprintf(" (%s)", outps)
- }
- if textcommon != "" {
- textcommon += "\n"
- }
-
- textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
-
- // Prepare arguments tocall.
- var argscommon []string // Arguments in the common part
- var argscall []string // Arguments for call prototype
- var argsgc []string // Arguments for gc call (with syscall6)
- var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
- n := 0
- argN := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if p.Type == "string" && errvar != "" {
- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
-
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
-
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
- textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
- n++
- } else if p.Type == "int64" && endianness != "" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
- } else if p.Type == "bool" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if p.Type == "int" {
- if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
- // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
-
- } else {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- }
- } else if p.Type == "int32" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- } else if p.Type == "int64" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
- } else if p.Type == "uint32" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
- } else if p.Type == "uint64" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
- } else if p.Type == "uintptr" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- }
- argN++
- }
- nargs := len(argsgc)
-
- // COMMON function generation
- argscommonlist := strings.Join(argscommon, ", ")
- callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
- ret := []string{"_", "_"}
- body := ""
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- ret[1] = reg
- doErrno = true
- } else {
- reg = "r0"
- ret[0] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%s != 0", reg)
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" {
- textcommon += fmt.Sprintf("\t%s\n", callcommon)
- } else {
- textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
- }
- textcommon += body
-
- if doErrno {
- textcommon += "\tif e1 != 0 {\n"
- textcommon += "\t\terr = errnoErr(e1)\n"
- textcommon += "\t}\n"
- }
- textcommon += "\treturn\n"
- textcommon += "}\n"
-
- if onlyCommon {
- continue
- }
-
- // CALL Prototype
- callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
-
- // GC function generation
- asm := "syscall6"
- if nonblock != nil {
- asm = "rawSyscall6"
- }
-
- if len(argsgc) <= 6 {
- for len(argsgc) < 6 {
- argsgc = append(argsgc, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
- os.Exit(1)
- }
- argsgclist := strings.Join(argsgc, ", ")
- callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
-
- textgc += callProto
- textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
- textgc += "\treturn\n}\n"
-
- // GCCGO function generation
- argsgccgolist := strings.Join(argsgccgo, ", ")
- var callgccgo string
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
- } else {
- callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
- }
- textgccgo += callProto
- textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
- textgccgo += "\te1 = syscall.GetErrno()\n"
- textgccgo += "\treturn\n}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
-
- // Print zsyscall_aix_ppc64.go
- err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
- []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- // Print zsyscall_aix_ppc64_gc.go
- vardecls := "\t" + strings.Join(vars, ",\n\t")
- vardecls += " syscallFunc"
- err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
- []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- // Print zsyscall_aix_ppc64_gccgo.go
- err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
- []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-}
-
-const srcTemplate1 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-import (
- "unsafe"
-)
-
-
-%s
-
-%s
-`
-const srcTemplate2 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-// +build !gccgo
-
-package %s
-
-import (
- "unsafe"
-)
-%s
-%s
-%s
-type syscallFunc uintptr
-
-var (
-%s
-)
-
-// Implemented in runtime/syscall_aix.go.
-func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-
-%s
-`
-const srcTemplate3 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-// +build gccgo
-
-package %s
-
-%s
-*/
-import "C"
-import (
- "syscall"
-)
-
-
-%s
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
deleted file mode 100644
index 3d864738b..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
- This program reads a file containing function prototypes
- (like syscall_solaris.go) and generates system call bodies.
- The prototypes are marked by lines beginning with "//sys"
- and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-*/
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- text := ""
- dynimports := ""
- linknames := ""
- var vars []string
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // So file name.
- if modname == "" {
- modname = "libc"
- }
-
- // System call name.
- if sysname == "" {
- sysname = funct
- }
-
- // System call pointer variable name.
- sysvarname := fmt.Sprintf("proc%s", sysname)
-
- strconvfunc := "BytePtrFromString"
- strconvtype := "*byte"
-
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- // Runtime import of function to allow cross-platform builds.
- dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
- // Link symbol to proc address variable.
- linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
- // Library proc address variable.
- vars = append(vars, sysvarname)
-
- // Go function header.
- outlist := strings.Join(out, ", ")
- if outlist != "" {
- outlist = fmt.Sprintf(" (%s)", outlist)
- }
- if text != "" {
- text += "\n"
- }
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
-
- // Check if err return available
- errvar := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- continue
- }
- }
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
- n++
- } else if p.Type == "int64" && endianness != "" {
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- } else if p.Type == "bool" {
- text += fmt.Sprintf("\tvar _p%d uint32\n", n)
- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
- args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
- n++
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- }
- nargs := len(args)
-
- // Determine which form to use; pad args with zeros.
- asm := "sysvicall6"
- if nonblock != nil {
- asm = "rawSysvicall6"
- }
- if len(args) <= 6 {
- for len(args) < 6 {
- args = append(args, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
- os.Exit(1)
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
-
- // Assign return values.
- body := ""
- ret := []string{"_", "_", "_"}
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- ret[2] = reg
- doErrno = true
- } else {
- reg = fmt.Sprintf("r%d", i)
- ret[i] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%d != 0", reg)
- }
- if p.Type == "int64" && endianness != "" {
- // 64-bit number in r1:r0 or r0:r1.
- if i+2 > len(out) {
- fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
- os.Exit(1)
- }
- if endianness == "big-endian" {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
- } else {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
- }
- ret[i] = fmt.Sprintf("r%d", i)
- ret[i+1] = fmt.Sprintf("r%d", i+1)
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
- text += fmt.Sprintf("\t%s\n", call)
- } else {
- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
- }
- text += body
-
- if doErrno {
- text += "\tif e1 != 0 {\n"
- text += "\t\terr = e1\n"
- text += "\t}\n"
- }
- text += "\treturn\n"
- text += "}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
- vardecls := "\t" + strings.Join(vars, ",\n\t")
- vardecls += " syscallFunc"
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-import (
- "syscall"
- "unsafe"
-)
-%s
-%s
-%s
-var (
-%s
-)
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go
deleted file mode 100644
index 07f8960ff..000000000
--- a/vendor/golang.org/x/sys/unix/mksysnum.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Generate system call table for DragonFly, NetBSD,
-// FreeBSD, OpenBSD or Darwin from master list
-// (for example, /usr/src/sys/kern/syscalls.master or
-// sys/syscall.h).
-package main
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- goos, goarch string
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return fmt.Sprintf("%s,%s", goarch, goos)
-}
-
-func checkErr(err error) {
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-}
-
-// source string and substring slice for regexp
-type re struct {
- str string // source string
- sub []string // matched sub-string
-}
-
-// Match performs regular expression match
-func (r *re) Match(exp string) bool {
- r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
- if r.sub != nil {
- return true
- }
- return false
-}
-
-// fetchFile fetches a text file from URL
-func fetchFile(URL string) io.Reader {
- resp, err := http.Get(URL)
- checkErr(err)
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- checkErr(err)
- return strings.NewReader(string(body))
-}
-
-// readFile reads a text file from path
-func readFile(path string) io.Reader {
- file, err := os.Open(os.Args[1])
- checkErr(err)
- return file
-}
-
-func format(name, num, proto string) string {
- name = strings.ToUpper(name)
- // There are multiple entries for enosys and nosys, so comment them out.
- nm := re{str: name}
- if nm.Match(`^SYS_E?NOSYS$`) {
- name = fmt.Sprintf("// %s", name)
- }
- if name == `SYS_SYS_EXIT` {
- name = `SYS_EXIT`
- }
- return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
-}
-
-func main() {
- // Get the OS (using GOOS_TARGET if it exist)
- goos = os.Getenv("GOOS_TARGET")
- if goos == "" {
- goos = os.Getenv("GOOS")
- }
- // Get the architecture (using GOARCH_TARGET if it exists)
- goarch = os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check if GOOS and GOARCH environment variables are defined
- if goarch == "" || goos == "" {
- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
- os.Exit(1)
- }
-
- file := strings.TrimSpace(os.Args[1])
- var syscalls io.Reader
- if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
- // Download syscalls.master file
- syscalls = fetchFile(file)
- } else {
- syscalls = readFile(file)
- }
-
- var text, line string
- s := bufio.NewScanner(syscalls)
- for s.Scan() {
- t := re{str: line}
- if t.Match(`^(.*)\\$`) {
- // Handle continuation
- line = t.sub[1]
- line += strings.TrimLeft(s.Text(), " \t")
- } else {
- // New line
- line = s.Text()
- }
- t = re{str: line}
- if t.Match(`\\$`) {
- continue
- }
- t = re{str: line}
-
- switch goos {
- case "dragonfly":
- if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
- num, proto := t.sub[1], t.sub[2]
- name := fmt.Sprintf("SYS_%s", t.sub[3])
- text += format(name, num, proto)
- }
- case "freebsd":
- if t.Match(`^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$`) {
- num, proto := t.sub[1], t.sub[2]
- name := fmt.Sprintf("SYS_%s", t.sub[3])
- text += format(name, num, proto)
- }
- case "openbsd":
- if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
- num, proto, name := t.sub[1], t.sub[3], t.sub[4]
- text += format(name, num, proto)
- }
- case "netbsd":
- if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
- num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
- name := t.sub[7] + "_" + t.sub[9]
- if t.sub[11] != "" {
- name = t.sub[7] + "_" + t.sub[11]
- }
- name = strings.ToUpper(name)
- if compat == "" || compat == "13" || compat == "30" || compat == "50" {
- text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
- }
- }
- case "darwin":
- if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
- name, num := t.sub[1], t.sub[2]
- name = strings.ToUpper(name)
- text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
- }
- default:
- fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
- os.Exit(1)
-
- }
- }
- err := s.Err()
- checkErr(err)
-
- fmt.Printf(template, cmdLine(), buildTags(), text)
-}
-
-const template = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-const(
-%s)`
diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go
deleted file mode 100644
index 25e834940..000000000
--- a/vendor/golang.org/x/sys/unix/types_aix.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-// +build aix
-
-/*
-Input to cgo -godefs. See also mkerrors.sh and mkall.sh
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-
-#include
-#include
-#include
-#include
-
-
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
- PathMax = C.PATH_MAX
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-type off64 C.off64_t
-type off C.off_t
-type Mode_t C.mode_t
-
-// Time
-
-type Timespec C.struct_timespec
-
-type StTimespec C.struct_st_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-type Timex C.struct_timex
-
-type Time_t C.time_t
-
-type Tms C.struct_tms
-
-type Utimbuf C.struct_utimbuf
-
-type Timezone C.struct_timezone
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit64
-
-type Pid_t C.pid_t
-
-type _Gid_t C.gid_t
-
-type dev_t C.dev_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type StatxTimestamp C.struct_statx_timestamp
-
-type Statx_t C.struct_statx
-
-type Dirent C.struct_dirent
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Cmsghdr C.struct_cmsghdr
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type Linger C.struct_linger
-
-type Msghdr C.struct_msghdr
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
-)
-
-type IfMsgHdr C.struct_if_msghdr
-
-// Misc
-
-type FdSet C.fd_set
-
-type Utsname C.struct_utsname
-
-type Ustat_t C.struct_ustat
-
-type Sigset_t C.sigset_t
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Termio C.struct_termio
-
-type Winsize C.struct_winsize
-
-//poll
-
-type PollFd struct {
- Fd int32
- Events uint16
- Revents uint16
-}
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-//flock_t
-
-type Flock_t C.struct_flock64
-
-// Statfs
-
-type Fsid_t C.struct_fsid_t
-type Fsid64_t C.struct_fsid64_t
-
-type Statfs_t C.struct_statfs
-
-const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
deleted file mode 100644
index 155c2e692..000000000
--- a/vendor/golang.org/x/sys/unix/types_darwin.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define __DARWIN_UNIX03 0
-#define KERNEL
-#define _DARWIN_USE_64_BIT_INODE
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat64
-
-type Statfs_t C.struct_statfs64
-
-type Flock_t C.struct_flock
-
-type Fstore_t C.struct_fstore
-
-type Radvisory_t C.struct_radvisory
-
-type Fbootstraptransfer_t C.struct_fbootstraptransfer
-
-type Log2phys_t C.struct_log2phys
-
-type Fsid C.struct_fsid
-
-type Dirent C.struct_dirent
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet4Pktinfo C.struct_in_pktinfo
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfmaMsghdr2 C.struct_ifma_msghdr2
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// uname
-
-type Utsname C.struct_utsname
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go
deleted file mode 100644
index 3365dd79d..000000000
--- a/vendor/golang.org/x/sys/unix/types_dragonfly.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.struct_fsid
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Uname
-
-type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go
deleted file mode 100644
index 747079895..000000000
--- a/vendor/golang.org/x/sys/unix/types_freebsd.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define _WANT_FREEBSD11_STAT 1
-#define _WANT_FREEBSD11_STATFS 1
-#define _WANT_FREEBSD11_DIRENT 1
-#define _WANT_FREEBSD11_KEVENT 1
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
-// See /usr/include/net/if.h.
-struct if_data8 {
- u_char ifi_type;
- u_char ifi_physical;
- u_char ifi_addrlen;
- u_char ifi_hdrlen;
- u_char ifi_link_state;
- u_char ifi_spare_char1;
- u_char ifi_spare_char2;
- u_char ifi_datalen;
- u_long ifi_mtu;
- u_long ifi_metric;
- u_long ifi_baudrate;
- u_long ifi_ipackets;
- u_long ifi_ierrors;
- u_long ifi_opackets;
- u_long ifi_oerrors;
- u_long ifi_collisions;
- u_long ifi_ibytes;
- u_long ifi_obytes;
- u_long ifi_imcasts;
- u_long ifi_omcasts;
- u_long ifi_iqdrops;
- u_long ifi_noproto;
- u_long ifi_hwassist;
-// FIXME: these are now unions, so maybe need to change definitions?
-#undef ifi_epoch
- time_t ifi_epoch;
-#undef ifi_lastchange
- struct timeval ifi_lastchange;
-};
-
-// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
-// See /usr/include/net/if.h.
-struct if_msghdr8 {
- u_short ifm_msglen;
- u_char ifm_version;
- u_char ifm_type;
- int ifm_addrs;
- int ifm_flags;
- u_short ifm_index;
- struct if_data8 ifm_data;
-};
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-const (
- _statfsVersion = C.STATFS_VERSION
- _dirblksiz = C.DIRBLKSIZ
-)
-
-type Stat_t C.struct_stat
-
-type stat_freebsd11_t C.struct_freebsd11_stat
-
-type Statfs_t C.struct_statfs
-
-type statfs_freebsd11_t C.struct_freebsd11_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type dirent_freebsd11 C.struct_freebsd11_dirent
-
-type Fsid C.struct_fsid
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Advice to Fadvise
-
-const (
- FADV_NORMAL = C.POSIX_FADV_NORMAL
- FADV_RANDOM = C.POSIX_FADV_RANDOM
- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
- FADV_WILLNEED = C.POSIX_FADV_WILLNEED
- FADV_DONTNEED = C.POSIX_FADV_DONTNEED
- FADV_NOREUSE = C.POSIX_FADV_NOREUSE
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPMreqn C.struct_ip_mreqn
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPMreqn = C.sizeof_struct_ip_mreqn
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent_freebsd11
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- sizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
- sizeofIfData = C.sizeof_struct_if_data
- SizeofIfData = C.sizeof_struct_if_data8
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type ifMsghdr C.struct_if_msghdr
-
-type IfMsghdr C.struct_if_msghdr8
-
-type ifData C.struct_if_data
-
-type IfData C.struct_if_data8
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
- SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfZbuf C.struct_bpf_zbuf
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfZbufHeader C.struct_bpf_zbuf_header
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLINIGNEOF = C.POLLINIGNEOF
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Capabilities
-
-type CapRights C.struct_cap_rights
-
-// Uname
-
-type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go
deleted file mode 100644
index 2dd4f9542..000000000
--- a/vendor/golang.org/x/sys/unix/types_netbsd.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.fsid_t
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Advice to Fadvise
-
-const (
- FADV_NORMAL = C.POSIX_FADV_NORMAL
- FADV_RANDOM = C.POSIX_FADV_RANDOM
- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
- FADV_WILLNEED = C.POSIX_FADV_WILLNEED
- FADV_DONTNEED = C.POSIX_FADV_DONTNEED
- FADV_NOREUSE = C.POSIX_FADV_NOREUSE
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-type Mclpool C.struct_mclpool
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfTimeval C.struct_bpf_timeval
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-type Ptmget C.struct_ptmget
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Sysctl
-
-type Sysctlnode C.struct_sysctlnode
-
-// Uname
-
-type Utsname C.struct_utsname
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go
deleted file mode 100644
index 8aafbe446..000000000
--- a/vendor/golang.org/x/sys/unix/types_openbsd.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.fsid_t
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-type Mclpool C.struct_mclpool
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfTimeval C.struct_bpf_timeval
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Signal Sets
-
-type Sigset_t C.sigset_t
-
-// Uname
-
-type Utsname C.struct_utsname
-
-// Uvmexp
-
-const SizeofUvmexp = C.sizeof_struct_uvmexp
-
-type Uvmexp C.struct_uvmexp
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go
deleted file mode 100644
index 2b716f934..000000000
--- a/vendor/golang.org/x/sys/unix/types_solaris.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-// These defines ensure that builds done on newer versions of Solaris are
-// backwards-compatible with older versions of Solaris and
-// OpenSolaris-based derivatives.
-#define __USE_SUNOS_SOCKETS__ // msghdr
-#define __USE_LEGACY_PROTOTYPES__ // iovec
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
- PathMax = C.PATH_MAX
- MaxHostNameLen = C.MAXHOSTNAMELEN
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-type Tms C.struct_tms
-
-type Utimbuf C.struct_utimbuf
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-// Filesystems
-
-type _Fsblkcnt_t C.fsblkcnt_t
-
-type Statvfs_t C.struct_statvfs
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Select
-
-type FdSet C.fd_set
-
-// Misc
-
-type Utsname C.struct_utsname
-
-type Ustat_t C.struct_ustat
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_EACCESS = C.AT_EACCESS
-)
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfTimeval C.struct_bpf_timeval
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Termio C.struct_termio
-
-type Winsize C.struct_winsize
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
deleted file mode 100644
index 0c8eba7e5..000000000
--- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "encoding/xml"
- "fmt"
- "io"
- "log"
- "strings"
-
- "golang.org/x/text/internal/gen"
-)
-
-type registry struct {
- XMLName xml.Name `xml:"registry"`
- Updated string `xml:"updated"`
- Registry []struct {
- ID string `xml:"id,attr"`
- Record []struct {
- Name string `xml:"name"`
- Xref []struct {
- Type string `xml:"type,attr"`
- Data string `xml:"data,attr"`
- } `xml:"xref"`
- Desc struct {
- Data string `xml:",innerxml"`
- // Any []struct {
- // Data string `xml:",chardata"`
- // } `xml:",any"`
- // Data string `xml:",chardata"`
- } `xml:"description,"`
- MIB string `xml:"value"`
- Alias []string `xml:"alias"`
- MIME string `xml:"preferred_alias"`
- } `xml:"record"`
- } `xml:"registry"`
-}
-
-func main() {
- r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
- reg := ®istry{}
- if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
- log.Fatalf("Error decoding charset registry: %v", err)
- }
- if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
- log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
- }
-
- w := &bytes.Buffer{}
- fmt.Fprintf(w, "const (\n")
- for _, rec := range reg.Registry[0].Record {
- constName := ""
- for _, a := range rec.Alias {
- if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
- // Some of the constant definitions have comments in them. Strip those.
- constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
- }
- }
- if constName == "" {
- switch rec.MIB {
- case "2085":
- constName = "HZGB2312" // Not listed as alias for some reason.
- default:
- log.Fatalf("No cs alias defined for %s.", rec.MIB)
- }
- }
- if rec.MIME != "" {
- rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
- }
- fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
- if len(rec.Desc.Data) > 0 {
- fmt.Fprint(w, "// ")
- d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
- inElem := true
- attr := ""
- for {
- t, err := d.Token()
- if err != nil {
- if err != io.EOF {
- log.Fatal(err)
- }
- break
- }
- switch x := t.(type) {
- case xml.CharData:
- attr = "" // Don't need attribute info.
- a := bytes.Split([]byte(x), []byte("\n"))
- for i, b := range a {
- if b = bytes.TrimSpace(b); len(b) != 0 {
- if !inElem && i > 0 {
- fmt.Fprint(w, "\n// ")
- }
- inElem = false
- fmt.Fprintf(w, "%s ", string(b))
- }
- }
- case xml.StartElement:
- if x.Name.Local == "xref" {
- inElem = true
- use := false
- for _, a := range x.Attr {
- if a.Name.Local == "type" {
- use = use || a.Value != "person"
- }
- if a.Name.Local == "data" && use {
- attr = a.Value + " "
- }
- }
- }
- case xml.EndElement:
- inElem = false
- fmt.Fprint(w, attr)
- }
- }
- fmt.Fprint(w, "\n")
- }
- for _, x := range rec.Xref {
- switch x.Type {
- case "rfc":
- fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
- case "uri":
- fmt.Fprintf(w, "// Reference: %s\n", x.Data)
- }
- }
- fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
- fmt.Fprintln(w)
- }
- fmt.Fprintln(w, ")")
-
- gen.WriteGoFile("mib.go", "identifier", w.Bytes())
-}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go
deleted file mode 100644
index 987fc169c..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "flag"
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/triegen"
- "golang.org/x/text/internal/ucd"
-)
-
-var outputFile = flag.String("out", "tables.go", "output file")
-
-func main() {
- gen.Init()
- gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
- gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
-
- genTables()
-}
-
-// bidiClass names and codes taken from class "bc" in
-// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
-var bidiClass = map[string]Class{
- "AL": AL, // ArabicLetter
- "AN": AN, // ArabicNumber
- "B": B, // ParagraphSeparator
- "BN": BN, // BoundaryNeutral
- "CS": CS, // CommonSeparator
- "EN": EN, // EuropeanNumber
- "ES": ES, // EuropeanSeparator
- "ET": ET, // EuropeanTerminator
- "L": L, // LeftToRight
- "NSM": NSM, // NonspacingMark
- "ON": ON, // OtherNeutral
- "R": R, // RightToLeft
- "S": S, // SegmentSeparator
- "WS": WS, // WhiteSpace
-
- "FSI": Control,
- "PDF": Control,
- "PDI": Control,
- "LRE": Control,
- "LRI": Control,
- "LRO": Control,
- "RLE": Control,
- "RLI": Control,
- "RLO": Control,
-}
-
-func genTables() {
- if numClass > 0x0F {
- log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
- }
- w := gen.NewCodeWriter()
- defer w.WriteVersionedGoFile(*outputFile, "bidi")
-
- gen.WriteUnicodeVersion(w)
-
- t := triegen.NewTrie("bidi")
-
- // Build data about bracket mapping. These bits need to be or-ed with
- // any other bits.
- orMask := map[rune]uint64{}
-
- xorMap := map[rune]int{}
- xorMasks := []rune{0} // First value is no-op.
-
- ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
- r1 := p.Rune(0)
- r2 := p.Rune(1)
- xor := r1 ^ r2
- if _, ok := xorMap[xor]; !ok {
- xorMap[xor] = len(xorMasks)
- xorMasks = append(xorMasks, xor)
- }
- entry := uint64(xorMap[xor]) << xorMaskShift
- switch p.String(2) {
- case "o":
- entry |= openMask
- case "c", "n":
- default:
- log.Fatalf("Unknown bracket class %q.", p.String(2))
- }
- orMask[r1] = entry
- })
-
- w.WriteComment(`
- xorMasks contains masks to be xor-ed with brackets to get the reverse
- version.`)
- w.WriteVar("xorMasks", xorMasks)
-
- done := map[rune]bool{}
-
- insert := func(r rune, c Class) {
- if !done[r] {
- t.Insert(r, orMask[r]|uint64(c))
- done[r] = true
- }
- }
-
- // Insert the derived BiDi properties.
- ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
- r := p.Rune(0)
- class, ok := bidiClass[p.String(1)]
- if !ok {
- log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
- }
- insert(r, class)
- })
- visitDefaults(insert)
-
- // TODO: use sparse blocks. This would reduce table size considerably
- // from the looks of it.
-
- sz, err := t.Gen(w)
- if err != nil {
- log.Fatal(err)
- }
- w.Size += sz
-}
-
-// dummy values to make methods in gen_common compile. The real versions
-// will be generated by this file to tables.go.
-var (
- xorMasks []rune
-)
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
deleted file mode 100644
index 02c3b505d..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "unicode"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/ucd"
- "golang.org/x/text/unicode/rangetable"
-)
-
-// These tables are hand-extracted from:
-// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
-func visitDefaults(fn func(r rune, c Class)) {
- // first write default values for ranges listed above.
- visitRunes(fn, AL, []rune{
- 0x0600, 0x07BF, // Arabic
- 0x08A0, 0x08FF, // Arabic Extended-A
- 0xFB50, 0xFDCF, // Arabic Presentation Forms
- 0xFDF0, 0xFDFF,
- 0xFE70, 0xFEFF,
- 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
- })
- visitRunes(fn, R, []rune{
- 0x0590, 0x05FF, // Hebrew
- 0x07C0, 0x089F, // Nko et al.
- 0xFB1D, 0xFB4F,
- 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
- 0x0001E800, 0x0001EDFF,
- 0x0001EF00, 0x0001EFFF,
- })
- visitRunes(fn, ET, []rune{ // European Terminator
- 0x20A0, 0x20Cf, // Currency symbols
- })
- rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
- fn(r, BN) // Boundary Neutral
- })
- ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
- if p.String(1) == "Default_Ignorable_Code_Point" {
- fn(p.Rune(0), BN) // Boundary Neutral
- }
- })
-}
-
-func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
- for i := 0; i < len(runes); i += 2 {
- lo, hi := runes[i], runes[i+1]
- for j := lo; j <= hi; j++ {
- fn(j, c)
- }
- }
-}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
deleted file mode 100644
index 9cb994289..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// Class is the Unicode BiDi class. Each rune has a single class.
-type Class uint
-
-const (
- L Class = iota // LeftToRight
- R // RightToLeft
- EN // EuropeanNumber
- ES // EuropeanSeparator
- ET // EuropeanTerminator
- AN // ArabicNumber
- CS // CommonSeparator
- B // ParagraphSeparator
- S // SegmentSeparator
- WS // WhiteSpace
- ON // OtherNeutral
- BN // BoundaryNeutral
- NSM // NonspacingMark
- AL // ArabicLetter
- Control // Control LRO - PDI
-
- numClass
-
- LRO // LeftToRightOverride
- RLO // RightToLeftOverride
- LRE // LeftToRightEmbedding
- RLE // RightToLeftEmbedding
- PDF // PopDirectionalFormat
- LRI // LeftToRightIsolate
- RLI // RightToLeftIsolate
- FSI // FirstStrongIsolate
- PDI // PopDirectionalIsolate
-
- unknownClass = ^Class(0)
-)
-
-var controlToClass = map[rune]Class{
- 0x202D: LRO, // LeftToRightOverride,
- 0x202E: RLO, // RightToLeftOverride,
- 0x202A: LRE, // LeftToRightEmbedding,
- 0x202B: RLE, // RightToLeftEmbedding,
- 0x202C: PDF, // PopDirectionalFormat,
- 0x2066: LRI, // LeftToRightIsolate,
- 0x2067: RLI, // RightToLeftIsolate,
- 0x2068: FSI, // FirstStrongIsolate,
- 0x2069: PDI, // PopDirectionalIsolate,
-}
-
-// A trie entry has the following bits:
-// 7..5 XOR mask for brackets
-// 4 1: Bracket open, 0: Bracket close
-// 3..0 Class type
-
-const (
- openMask = 0x10
- xorMaskShift = 5
-)
diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go
deleted file mode 100644
index 30a3aa933..000000000
--- a/vendor/golang.org/x/text/unicode/norm/maketables.go
+++ /dev/null
@@ -1,986 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Normalization table generator.
-// Data read from the web.
-// See forminfo.go for a description of the trie values associated with each rune.
-
-package main
-
-import (
- "bytes"
- "encoding/binary"
- "flag"
- "fmt"
- "io"
- "log"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/triegen"
- "golang.org/x/text/internal/ucd"
-)
-
-func main() {
- gen.Init()
- loadUnicodeData()
- compactCCC()
- loadCompositionExclusions()
- completeCharFields(FCanonical)
- completeCharFields(FCompatibility)
- computeNonStarterCounts()
- verifyComputed()
- printChars()
- testDerived()
- printTestdata()
- makeTables()
-}
-
-var (
- tablelist = flag.String("tables",
- "all",
- "comma-separated list of which tables to generate; "+
- "can be 'decomp', 'recomp', 'info' and 'all'")
- test = flag.Bool("test",
- false,
- "test existing tables against DerivedNormalizationProps and generate test data for regression testing")
- verbose = flag.Bool("verbose",
- false,
- "write data to stdout as it is parsed")
-)
-
-const MaxChar = 0x10FFFF // anything above this shouldn't exist
-
-// Quick Check properties of runes allow us to quickly
-// determine whether a rune may occur in a normal form.
-// For a given normal form, a rune may be guaranteed to occur
-// verbatim (QC=Yes), may or may not combine with another
-// rune (QC=Maybe), or may not occur (QC=No).
-type QCResult int
-
-const (
- QCUnknown QCResult = iota
- QCYes
- QCNo
- QCMaybe
-)
-
-func (r QCResult) String() string {
- switch r {
- case QCYes:
- return "Yes"
- case QCNo:
- return "No"
- case QCMaybe:
- return "Maybe"
- }
- return "***UNKNOWN***"
-}
-
-const (
- FCanonical = iota // NFC or NFD
- FCompatibility // NFKC or NFKD
- FNumberOfFormTypes
-)
-
-const (
- MComposed = iota // NFC or NFKC
- MDecomposed // NFD or NFKD
- MNumberOfModes
-)
-
-// This contains only the properties we're interested in.
-type Char struct {
- name string
- codePoint rune // if zero, this index is not a valid code point.
- ccc uint8 // canonical combining class
- origCCC uint8
- excludeInComp bool // from CompositionExclusions.txt
- compatDecomp bool // it has a compatibility expansion
-
- nTrailingNonStarters uint8
- nLeadingNonStarters uint8 // must be equal to trailing if non-zero
-
- forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
-
- state State
-}
-
-var chars = make([]Char, MaxChar+1)
-var cccMap = make(map[uint8]uint8)
-
-func (c Char) String() string {
- buf := new(bytes.Buffer)
-
- fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
- fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
- fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
- fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
- fmt.Fprintf(buf, " state: %v\n", c.state)
- fmt.Fprintf(buf, " NFC:\n")
- fmt.Fprint(buf, c.forms[FCanonical])
- fmt.Fprintf(buf, " NFKC:\n")
- fmt.Fprint(buf, c.forms[FCompatibility])
-
- return buf.String()
-}
-
-// In UnicodeData.txt, some ranges are marked like this:
-// 3400;;Lo;0;L;;;;;N;;;;;
-// 4DB5;;Lo;0;L;;;;;N;;;;;
-// parseCharacter keeps a state variable indicating the weirdness.
-type State int
-
-const (
- SNormal State = iota // known to be zero for the type
- SFirst
- SLast
- SMissing
-)
-
-var lastChar = rune('\u0000')
-
-func (c Char) isValid() bool {
- return c.codePoint != 0 && c.state != SMissing
-}
-
-type FormInfo struct {
- quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
- verified [MNumberOfModes]bool // index: MComposed or MDecomposed
-
- combinesForward bool // May combine with rune on the right
- combinesBackward bool // May combine with rune on the left
- isOneWay bool // Never appears in result
- inDecomp bool // Some decompositions result in this char.
- decomp Decomposition
- expandedDecomp Decomposition
-}
-
-func (f FormInfo) String() string {
- buf := bytes.NewBuffer(make([]byte, 0))
-
- fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
- fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
- fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
- fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
- fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
- fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
- fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
- fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
-
- return buf.String()
-}
-
-type Decomposition []rune
-
-func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
- decomp := strings.Split(s, " ")
- if len(decomp) > 0 && skipfirst {
- decomp = decomp[1:]
- }
- for _, d := range decomp {
- point, err := strconv.ParseUint(d, 16, 64)
- if err != nil {
- return a, err
- }
- a = append(a, rune(point))
- }
- return a, nil
-}
-
-func loadUnicodeData() {
- f := gen.OpenUCDFile("UnicodeData.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- r := p.Rune(ucd.CodePoint)
- char := &chars[r]
-
- char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
- decmap := p.String(ucd.DecompMapping)
-
- exp, err := parseDecomposition(decmap, false)
- isCompat := false
- if err != nil {
- if len(decmap) > 0 {
- exp, err = parseDecomposition(decmap, true)
- if err != nil {
- log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
- }
- isCompat = true
- }
- }
-
- char.name = p.String(ucd.Name)
- char.codePoint = r
- char.forms[FCompatibility].decomp = exp
- if !isCompat {
- char.forms[FCanonical].decomp = exp
- } else {
- char.compatDecomp = true
- }
- if len(decmap) > 0 {
- char.forms[FCompatibility].decomp = exp
- }
- }
- if err := p.Err(); err != nil {
- log.Fatal(err)
- }
-}
-
-// compactCCC converts the sparse set of CCC values to a continguous one,
-// reducing the number of bits needed from 8 to 6.
-func compactCCC() {
- m := make(map[uint8]uint8)
- for i := range chars {
- c := &chars[i]
- m[c.ccc] = 0
- }
- cccs := []int{}
- for v, _ := range m {
- cccs = append(cccs, int(v))
- }
- sort.Ints(cccs)
- for i, c := range cccs {
- cccMap[uint8(i)] = uint8(c)
- m[uint8(c)] = uint8(i)
- }
- for i := range chars {
- c := &chars[i]
- c.origCCC = c.ccc
- c.ccc = m[c.ccc]
- }
- if len(m) >= 1<<6 {
- log.Fatalf("too many difference CCC values: %d >= 64", len(m))
- }
-}
-
-// CompositionExclusions.txt has form:
-// 0958 # ...
-// See https://unicode.org/reports/tr44/ for full explanation
-func loadCompositionExclusions() {
- f := gen.OpenUCDFile("CompositionExclusions.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- c := &chars[p.Rune(0)]
- if c.excludeInComp {
- log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
- }
- c.excludeInComp = true
- }
- if e := p.Err(); e != nil {
- log.Fatal(e)
- }
-}
-
-// hasCompatDecomp returns true if any of the recursive
-// decompositions contains a compatibility expansion.
-// In this case, the character may not occur in NFK*.
-func hasCompatDecomp(r rune) bool {
- c := &chars[r]
- if c.compatDecomp {
- return true
- }
- for _, d := range c.forms[FCompatibility].decomp {
- if hasCompatDecomp(d) {
- return true
- }
- }
- return false
-}
-
-// Hangul related constants.
-const (
- HangulBase = 0xAC00
- HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
-
- JamoLBase = 0x1100
- JamoLEnd = 0x1113
- JamoVBase = 0x1161
- JamoVEnd = 0x1176
- JamoTBase = 0x11A8
- JamoTEnd = 0x11C3
-
- JamoLVTCount = 19 * 21 * 28
- JamoTCount = 28
-)
-
-func isHangul(r rune) bool {
- return HangulBase <= r && r < HangulEnd
-}
-
-func isHangulWithoutJamoT(r rune) bool {
- if !isHangul(r) {
- return false
- }
- r -= HangulBase
- return r < JamoLVTCount && r%JamoTCount == 0
-}
-
-func ccc(r rune) uint8 {
- return chars[r].ccc
-}
-
-// Insert a rune in a buffer, ordered by Canonical Combining Class.
-func insertOrdered(b Decomposition, r rune) Decomposition {
- n := len(b)
- b = append(b, 0)
- cc := ccc(r)
- if cc > 0 {
- // Use bubble sort.
- for ; n > 0; n-- {
- if ccc(b[n-1]) <= cc {
- break
- }
- b[n] = b[n-1]
- }
- }
- b[n] = r
- return b
-}
-
-// Recursively decompose.
-func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
- dcomp := chars[r].forms[form].decomp
- if len(dcomp) == 0 {
- return insertOrdered(d, r)
- }
- for _, c := range dcomp {
- d = decomposeRecursive(form, c, d)
- }
- return d
-}
-
-func completeCharFields(form int) {
- // Phase 0: pre-expand decomposition.
- for i := range chars {
- f := &chars[i].forms[form]
- if len(f.decomp) == 0 {
- continue
- }
- exp := make(Decomposition, 0)
- for _, c := range f.decomp {
- exp = decomposeRecursive(form, c, exp)
- }
- f.expandedDecomp = exp
- }
-
- // Phase 1: composition exclusion, mark decomposition.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- // Marks script-specific exclusions and version restricted.
- f.isOneWay = c.excludeInComp
-
- // Singletons
- f.isOneWay = f.isOneWay || len(f.decomp) == 1
-
- // Non-starter decompositions
- if len(f.decomp) > 1 {
- chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
- f.isOneWay = f.isOneWay || chk
- }
-
- // Runes that decompose into more than two runes.
- f.isOneWay = f.isOneWay || len(f.decomp) > 2
-
- if form == FCompatibility {
- f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
- }
-
- for _, r := range f.decomp {
- chars[r].forms[form].inDecomp = true
- }
- }
-
- // Phase 2: forward and backward combining.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- if !f.isOneWay && len(f.decomp) == 2 {
- f0 := &chars[f.decomp[0]].forms[form]
- f1 := &chars[f.decomp[1]].forms[form]
- if !f0.isOneWay {
- f0.combinesForward = true
- }
- if !f1.isOneWay {
- f1.combinesBackward = true
- }
- }
- if isHangulWithoutJamoT(rune(i)) {
- f.combinesForward = true
- }
- }
-
- // Phase 3: quick check values.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- switch {
- case len(f.decomp) > 0:
- f.quickCheck[MDecomposed] = QCNo
- case isHangul(rune(i)):
- f.quickCheck[MDecomposed] = QCNo
- default:
- f.quickCheck[MDecomposed] = QCYes
- }
- switch {
- case f.isOneWay:
- f.quickCheck[MComposed] = QCNo
- case (i & 0xffff00) == JamoLBase:
- f.quickCheck[MComposed] = QCYes
- if JamoLBase <= i && i < JamoLEnd {
- f.combinesForward = true
- }
- if JamoVBase <= i && i < JamoVEnd {
- f.quickCheck[MComposed] = QCMaybe
- f.combinesBackward = true
- f.combinesForward = true
- }
- if JamoTBase <= i && i < JamoTEnd {
- f.quickCheck[MComposed] = QCMaybe
- f.combinesBackward = true
- }
- case !f.combinesBackward:
- f.quickCheck[MComposed] = QCYes
- default:
- f.quickCheck[MComposed] = QCMaybe
- }
- }
-}
-
-func computeNonStarterCounts() {
- // Phase 4: leading and trailing non-starter count
- for i := range chars {
- c := &chars[i]
-
- runes := []rune{rune(i)}
- // We always use FCompatibility so that the CGJ insertion points do not
- // change for repeated normalizations with different forms.
- if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
- runes = exp
- }
- // We consider runes that combine backwards to be non-starters for the
- // purpose of Stream-Safe Text Processing.
- for _, r := range runes {
- if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
- break
- }
- c.nLeadingNonStarters++
- }
- for i := len(runes) - 1; i >= 0; i-- {
- if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
- break
- }
- c.nTrailingNonStarters++
- }
- if c.nTrailingNonStarters > 3 {
- log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
- }
-
- if isHangul(rune(i)) {
- c.nTrailingNonStarters = 2
- if isHangulWithoutJamoT(rune(i)) {
- c.nTrailingNonStarters = 1
- }
- }
-
- if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
- log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
- }
- if t := c.nTrailingNonStarters; t > 3 {
- log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
- }
- }
-}
-
-func printBytes(w io.Writer, b []byte, name string) {
- fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
- fmt.Fprintf(w, "var %s = [...]byte {", name)
- for i, c := range b {
- switch {
- case i%64 == 0:
- fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
- case i%8 == 0:
- fmt.Fprintf(w, "\n")
- }
- fmt.Fprintf(w, "0x%.2X, ", c)
- }
- fmt.Fprint(w, "\n}\n\n")
-}
-
-// See forminfo.go for format.
-func makeEntry(f *FormInfo, c *Char) uint16 {
- e := uint16(0)
- if r := c.codePoint; HangulBase <= r && r < HangulEnd {
- e |= 0x40
- }
- if f.combinesForward {
- e |= 0x20
- }
- if f.quickCheck[MDecomposed] == QCNo {
- e |= 0x4
- }
- switch f.quickCheck[MComposed] {
- case QCYes:
- case QCNo:
- e |= 0x10
- case QCMaybe:
- e |= 0x18
- default:
- log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
- }
- e |= uint16(c.nTrailingNonStarters)
- return e
-}
-
-// decompSet keeps track of unique decompositions, grouped by whether
-// the decomposition is followed by a trailing and/or leading CCC.
-type decompSet [7]map[string]bool
-
-const (
- normalDecomp = iota
- firstMulti
- firstCCC
- endMulti
- firstLeadingCCC
- firstCCCZeroExcept
- firstStarterWithNLead
- lastDecomp
-)
-
-var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
-
-func makeDecompSet() decompSet {
- m := decompSet{}
- for i := range m {
- m[i] = make(map[string]bool)
- }
- return m
-}
-func (m *decompSet) insert(key int, s string) {
- m[key][s] = true
-}
-
-func printCharInfoTables(w io.Writer) int {
- mkstr := func(r rune, f *FormInfo) (int, string) {
- d := f.expandedDecomp
- s := string([]rune(d))
- if max := 1 << 6; len(s) >= max {
- const msg = "%U: too many bytes in decomposition: %d >= %d"
- log.Fatalf(msg, r, len(s), max)
- }
- head := uint8(len(s))
- if f.quickCheck[MComposed] != QCYes {
- head |= 0x40
- }
- if f.combinesForward {
- head |= 0x80
- }
- s = string([]byte{head}) + s
-
- lccc := ccc(d[0])
- tccc := ccc(d[len(d)-1])
- cc := ccc(r)
- if cc != 0 && lccc == 0 && tccc == 0 {
- log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
- }
- if tccc < lccc && lccc != 0 {
- const msg = "%U: lccc (%d) must be <= tcc (%d)"
- log.Fatalf(msg, r, lccc, tccc)
- }
- index := normalDecomp
- nTrail := chars[r].nTrailingNonStarters
- nLead := chars[r].nLeadingNonStarters
- if tccc > 0 || lccc > 0 || nTrail > 0 {
- tccc <<= 2
- tccc |= nTrail
- s += string([]byte{tccc})
- index = endMulti
- for _, r := range d[1:] {
- if ccc(r) == 0 {
- index = firstCCC
- }
- }
- if lccc > 0 || nLead > 0 {
- s += string([]byte{lccc})
- if index == firstCCC {
- log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
- }
- index = firstLeadingCCC
- }
- if cc != lccc {
- if cc != 0 {
- log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
- }
- index = firstCCCZeroExcept
- }
- } else if len(d) > 1 {
- index = firstMulti
- }
- return index, s
- }
-
- decompSet := makeDecompSet()
- const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
- decompSet.insert(firstStarterWithNLead, nLeadStr)
-
- // Store the uniqued decompositions in a byte buffer,
- // preceded by their byte length.
- for _, c := range chars {
- for _, f := range c.forms {
- if len(f.expandedDecomp) == 0 {
- continue
- }
- if f.combinesBackward {
- log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
- }
- index, s := mkstr(c.codePoint, &f)
- decompSet.insert(index, s)
- }
- }
-
- decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
- size := 0
- positionMap := make(map[string]uint16)
- decompositions.WriteString("\000")
- fmt.Fprintln(w, "const (")
- for i, m := range decompSet {
- sa := []string{}
- for s := range m {
- sa = append(sa, s)
- }
- sort.Strings(sa)
- for _, s := range sa {
- p := decompositions.Len()
- decompositions.WriteString(s)
- positionMap[s] = uint16(p)
- }
- if cname[i] != "" {
- fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
- }
- }
- fmt.Fprintln(w, "maxDecomp = 0x8000")
- fmt.Fprintln(w, ")")
- b := decompositions.Bytes()
- printBytes(w, b, "decomps")
- size += len(b)
-
- varnames := []string{"nfc", "nfkc"}
- for i := 0; i < FNumberOfFormTypes; i++ {
- trie := triegen.NewTrie(varnames[i])
-
- for r, c := range chars {
- f := c.forms[i]
- d := f.expandedDecomp
- if len(d) != 0 {
- _, key := mkstr(c.codePoint, &f)
- trie.Insert(rune(r), uint64(positionMap[key]))
- if c.ccc != ccc(d[0]) {
- // We assume the lead ccc of a decomposition !=0 in this case.
- if ccc(d[0]) == 0 {
- log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
- }
- }
- } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
- // Handle cases where it can't be detected that the nLead should be equal
- // to nTrail.
- trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
- } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
- trie.Insert(c.codePoint, uint64(0x8000|v))
- }
- }
- sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
- if err != nil {
- log.Fatal(err)
- }
- size += sz
- }
- return size
-}
-
-func contains(sa []string, s string) bool {
- for _, a := range sa {
- if a == s {
- return true
- }
- }
- return false
-}
-
-func makeTables() {
- w := &bytes.Buffer{}
-
- size := 0
- if *tablelist == "" {
- return
- }
- list := strings.Split(*tablelist, ",")
- if *tablelist == "all" {
- list = []string{"recomp", "info"}
- }
-
- // Compute maximum decomposition size.
- max := 0
- for _, c := range chars {
- if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
- max = n
- }
- }
- fmt.Fprintln(w, `import "sync"`)
- fmt.Fprintln(w)
-
- fmt.Fprintln(w, "const (")
- fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
- fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
- fmt.Fprintln(w)
- fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
- fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
- fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
- fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
- fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
- fmt.Fprintln(w, ")\n")
-
- // Print the CCC remap table.
- size += len(cccMap)
- fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
- for i := 0; i < len(cccMap); i++ {
- if i%8 == 0 {
- fmt.Fprintln(w)
- }
- fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
- }
- fmt.Fprintln(w, "\n}\n")
-
- if contains(list, "info") {
- size += printCharInfoTables(w)
- }
-
- if contains(list, "recomp") {
- // Note that we use 32 bit keys, instead of 64 bit.
- // This clips the bits of three entries, but we know
- // this won't cause a collision. The compiler will catch
- // any changes made to UnicodeData.txt that introduces
- // a collision.
- // Note that the recomposition map for NFC and NFKC
- // are identical.
-
- // Recomposition map
- nrentries := 0
- for _, c := range chars {
- f := c.forms[FCanonical]
- if !f.isOneWay && len(f.decomp) > 0 {
- nrentries++
- }
- }
- sz := nrentries * 8
- size += sz
- fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
- fmt.Fprintln(w, "var recompMap map[uint32]rune")
- fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
- fmt.Fprintln(w, `const recompMapPacked = "" +`)
- var buf [8]byte
- for i, c := range chars {
- f := c.forms[FCanonical]
- d := f.decomp
- if !f.isOneWay && len(d) > 0 {
- key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
- binary.BigEndian.PutUint32(buf[:4], key)
- binary.BigEndian.PutUint32(buf[4:], uint32(i))
- fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
- }
- }
- // hack so we don't have to special case the trailing plus sign
- fmt.Fprintf(w, ` ""`)
- fmt.Fprintln(w)
- }
-
- fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
- gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
-}
-
-func printChars() {
- if *verbose {
- for _, c := range chars {
- if !c.isValid() || c.state == SMissing {
- continue
- }
- fmt.Println(c)
- }
- }
-}
-
-// verifyComputed does various consistency tests.
-func verifyComputed() {
- for i, c := range chars {
- for _, f := range c.forms {
- isNo := (f.quickCheck[MDecomposed] == QCNo)
- if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
- log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
- }
-
- isMaybe := f.quickCheck[MComposed] == QCMaybe
- if f.combinesBackward != isMaybe {
- log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
- }
- if len(f.decomp) > 0 && f.combinesForward && isMaybe {
- log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
- }
-
- if len(f.expandedDecomp) != 0 {
- continue
- }
- if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
- // We accept these runes to be treated differently (it only affects
- // segment breaking in iteration, most likely on improper use), but
- // reconsider if more characters are added.
- // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;;
- // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;;
- // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
- // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
- // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
- // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;;
- if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
- log.Fatalf("%U: nLead was %v; want %v", i, a, b)
- }
- }
- }
- nfc := c.forms[FCanonical]
- nfkc := c.forms[FCompatibility]
- if nfc.combinesBackward != nfkc.combinesBackward {
- log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
- }
- }
-}
-
-// Use values in DerivedNormalizationProps.txt to compare against the
-// values we computed.
-// DerivedNormalizationProps.txt has form:
-// 00C0..00C5 ; NFD_QC; N # ...
-// 0374 ; NFD_QC; N # ...
-// See https://unicode.org/reports/tr44/ for full explanation
-func testDerived() {
- f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- r := p.Rune(0)
- c := &chars[r]
-
- var ftype, mode int
- qt := p.String(1)
- switch qt {
- case "NFC_QC":
- ftype, mode = FCanonical, MComposed
- case "NFD_QC":
- ftype, mode = FCanonical, MDecomposed
- case "NFKC_QC":
- ftype, mode = FCompatibility, MComposed
- case "NFKD_QC":
- ftype, mode = FCompatibility, MDecomposed
- default:
- continue
- }
- var qr QCResult
- switch p.String(2) {
- case "Y":
- qr = QCYes
- case "N":
- qr = QCNo
- case "M":
- qr = QCMaybe
- default:
- log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
- }
- if got := c.forms[ftype].quickCheck[mode]; got != qr {
- log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
- }
- c.forms[ftype].verified[mode] = true
- }
- if err := p.Err(); err != nil {
- log.Fatal(err)
- }
- // Any unspecified value must be QCYes. Verify this.
- for i, c := range chars {
- for j, fd := range c.forms {
- for k, qr := range fd.quickCheck {
- if !fd.verified[k] && qr != QCYes {
- m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
- log.Printf(m, i, j, k, qr, c.name)
- }
- }
- }
- }
-}
-
-var testHeader = `const (
- Yes = iota
- No
- Maybe
-)
-
-type formData struct {
- qc uint8
- combinesForward bool
- decomposition string
-}
-
-type runeData struct {
- r rune
- ccc uint8
- nLead uint8
- nTrail uint8
- f [2]formData // 0: canonical; 1: compatibility
-}
-
-func f(qc uint8, cf bool, dec string) [2]formData {
- return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
-}
-
-func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
- return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
-}
-
-var testData = []runeData{
-`
-
-func printTestdata() {
- type lastInfo struct {
- ccc uint8
- nLead uint8
- nTrail uint8
- f string
- }
-
- last := lastInfo{}
- w := &bytes.Buffer{}
- fmt.Fprintf(w, testHeader)
- for r, c := range chars {
- f := c.forms[FCanonical]
- qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
- f = c.forms[FCompatibility]
- qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
- s := ""
- if d == dk && qc == qck && cf == cfk {
- s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
- } else {
- s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
- }
- current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
- if last != current {
- fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
- last = current
- }
- }
- fmt.Fprintln(w, "}")
- gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
-}
diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go
deleted file mode 100644
index 45d711900..000000000
--- a/vendor/golang.org/x/text/unicode/norm/triegen.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Trie table generator.
-// Used by make*tables tools to generate a go file with trie data structures
-// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
-// sequence are used to lookup offsets in the index table to be used for the
-// next byte. The last byte is used to index into a table with 16-bit values.
-
-package main
-
-import (
- "fmt"
- "io"
-)
-
-const maxSparseEntries = 16
-
-type normCompacter struct {
- sparseBlocks [][]uint64
- sparseOffset []uint16
- sparseCount int
- name string
-}
-
-func mostFrequentStride(a []uint64) int {
- counts := make(map[int]int)
- var v int
- for _, x := range a {
- if stride := int(x) - v; v != 0 && stride >= 0 {
- counts[stride]++
- }
- v = int(x)
- }
- var maxs, maxc int
- for stride, cnt := range counts {
- if cnt > maxc || (cnt == maxc && stride < maxs) {
- maxs, maxc = stride, cnt
- }
- }
- return maxs
-}
-
-func countSparseEntries(a []uint64) int {
- stride := mostFrequentStride(a)
- var v, count int
- for _, tv := range a {
- if int(tv)-v != stride {
- if tv != 0 {
- count++
- }
- }
- v = int(tv)
- }
- return count
-}
-
-func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
- if n := countSparseEntries(v); n <= maxSparseEntries {
- return (n+1)*4 + 2, true
- }
- return 0, false
-}
-
-func (c *normCompacter) Store(v []uint64) uint32 {
- h := uint32(len(c.sparseOffset))
- c.sparseBlocks = append(c.sparseBlocks, v)
- c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
- c.sparseCount += countSparseEntries(v) + 1
- return h
-}
-
-func (c *normCompacter) Handler() string {
- return c.name + "Sparse.lookup"
-}
-
-func (c *normCompacter) Print(w io.Writer) (retErr error) {
- p := func(f string, x ...interface{}) {
- if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
- retErr = err
- }
- }
-
- ls := len(c.sparseBlocks)
- p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
- p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
-
- ns := c.sparseCount
- p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
- p("var %sSparseValues = [%d]valueRange {", c.name, ns)
- for i, b := range c.sparseBlocks {
- p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
- var v int
- stride := mostFrequentStride(b)
- n := countSparseEntries(b)
- p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
- for i, nv := range b {
- if int(nv)-v != stride {
- if v != 0 {
- p(",hi:%#02x},", 0x80+i-1)
- }
- if nv != 0 {
- p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
- }
- }
- v = int(nv)
- }
- if v != 0 {
- p(",hi:%#02x},", 0x80+len(b)-1)
- }
- }
- p("\n}\n\n")
- return
-}
diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go
deleted file mode 100644
index c55327119..000000000
--- a/vendor/google.golang.org/api/support/bundler/bundler.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2016 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package bundler supports bundling (batching) of items. Bundling amortizes an
-// action with fixed costs over multiple items. For example, if an API provides
-// an RPC that accepts a list of items as input, but clients would prefer
-// adding items one at a time, then a Bundler can accept individual items from
-// the client and bundle many of them into a single RPC.
-//
-// This package is experimental and subject to change without notice.
-package bundler
-
-import (
- "context"
- "errors"
- "math"
- "reflect"
- "sync"
- "time"
-
- "golang.org/x/sync/semaphore"
-)
-
-const (
- DefaultDelayThreshold = time.Second
- DefaultBundleCountThreshold = 10
- DefaultBundleByteThreshold = 1e6 // 1M
- DefaultBufferedByteLimit = 1e9 // 1G
-)
-
-var (
- // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.
- ErrOverflow = errors.New("bundler reached buffered byte limit")
-
- // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.
- ErrOversizedItem = errors.New("item size exceeds bundle byte limit")
-)
-
-// A Bundler collects items added to it into a bundle until the bundle
-// exceeds a given size, then calls a user-provided function to handle the bundle.
-type Bundler struct {
- // Starting from the time that the first message is added to a bundle, once
- // this delay has passed, handle the bundle. The default is DefaultDelayThreshold.
- DelayThreshold time.Duration
-
- // Once a bundle has this many items, handle the bundle. Since only one
- // item at a time is added to a bundle, no bundle will exceed this
- // threshold, so it also serves as a limit. The default is
- // DefaultBundleCountThreshold.
- BundleCountThreshold int
-
- // Once the number of bytes in current bundle reaches this threshold, handle
- // the bundle. The default is DefaultBundleByteThreshold. This triggers handling,
- // but does not cap the total size of a bundle.
- BundleByteThreshold int
-
- // The maximum size of a bundle, in bytes. Zero means unlimited.
- BundleByteLimit int
-
- // The maximum number of bytes that the Bundler will keep in memory before
- // returning ErrOverflow. The default is DefaultBufferedByteLimit.
- BufferedByteLimit int
-
- // The maximum number of handler invocations that can be running at once.
- // The default is 1.
- HandlerLimit int
-
- handler func(interface{}) // called to handle a bundle
- itemSliceZero reflect.Value // nil (zero value) for slice of items
- flushTimer *time.Timer // implements DelayThreshold
-
- mu sync.Mutex
- sem *semaphore.Weighted // enforces BufferedByteLimit
- semOnce sync.Once
- curBundle bundle // incoming items added to this bundle
-
- // Each bundle is assigned a unique ticket that determines the order in which the
- // handler is called. The ticket is assigned with mu locked, but waiting for tickets
- // to be handled is done via mu2 and cond, below.
- nextTicket uint64 // next ticket to be assigned
-
- mu2 sync.Mutex
- cond *sync.Cond
- nextHandled uint64 // next ticket to be handled
-
- // In this implementation, active uses space proportional to HandlerLimit, and
- // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire
- // or release occurs, so large values of HandlerLimit max may cause performance
- // issues.
- active map[uint64]bool // tickets of bundles actively being handled
-}
-
-type bundle struct {
- items reflect.Value // slice of item type
- size int // size in bytes of all items
-}
-
-// NewBundler creates a new Bundler.
-//
-// itemExample is a value of the type that will be bundled. For example, if you
-// want to create bundles of *Entry, you could pass &Entry{} for itemExample.
-//
-// handler is a function that will be called on each bundle. If itemExample is
-// of type T, the argument to handler is of type []T. handler is always called
-// sequentially for each bundle, and never in parallel.
-//
-// Configure the Bundler by setting its thresholds and limits before calling
-// any of its methods.
-func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {
- b := &Bundler{
- DelayThreshold: DefaultDelayThreshold,
- BundleCountThreshold: DefaultBundleCountThreshold,
- BundleByteThreshold: DefaultBundleByteThreshold,
- BufferedByteLimit: DefaultBufferedByteLimit,
- HandlerLimit: 1,
-
- handler: handler,
- itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),
- active: map[uint64]bool{},
- }
- b.curBundle.items = b.itemSliceZero
- b.cond = sync.NewCond(&b.mu2)
- return b
-}
-
-func (b *Bundler) initSemaphores() {
- // Create the semaphores lazily, because the user may set limits
- // after NewBundler.
- b.semOnce.Do(func() {
- b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit))
- })
-}
-
-// Add adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-//
-// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
-// the item can never be handled. Add returns ErrOversizedItem in this case.
-//
-// If adding the item would exceed the maximum memory allowed
-// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for
-// memory, Add returns ErrOverflow.
-//
-// Add never blocks.
-func (b *Bundler) Add(item interface{}, size int) error {
- // If this item exceeds the maximum size of a bundle,
- // we can never send it.
- if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
- return ErrOversizedItem
- }
- // If adding this item would exceed our allotted memory
- // footprint, we can't accept it.
- // (TryAcquire also returns false if anything is waiting on the semaphore,
- // so calls to Add and AddWait shouldn't be mixed.)
- b.initSemaphores()
- if !b.sem.TryAcquire(int64(size)) {
- return ErrOverflow
- }
- b.add(item, size)
- return nil
-}
-
-// add adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-func (b *Bundler) add(item interface{}, size int) {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // If adding this item to the current bundle would cause it to exceed the
- // maximum bundle size, close the current bundle and start a new one.
- if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit {
- b.startFlushLocked()
- }
- // Add the item.
- b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item))
- b.curBundle.size += size
-
- // Start a timer to flush the item if one isn't already running.
- // startFlushLocked clears the timer and closes the bundle at the same time,
- // so we only allocate a new timer for the first item in each bundle.
- // (We could try to call Reset on the timer instead, but that would add a lot
- // of complexity to the code just to save one small allocation.)
- if b.flushTimer == nil {
- b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush)
- }
-
- // If the current bundle equals the count threshold, close it.
- if b.curBundle.items.Len() == b.BundleCountThreshold {
- b.startFlushLocked()
- }
- // If the current bundle equals or exceeds the byte threshold, close it.
- if b.curBundle.size >= b.BundleByteThreshold {
- b.startFlushLocked()
- }
-}
-
-// AddWait adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-//
-// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
-// the item can never be handled. AddWait returns ErrOversizedItem in this case.
-//
-// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),
-// AddWait blocks until space is available or ctx is done.
-//
-// Calls to Add and AddWait should not be mixed on the same Bundler.
-func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error {
- // If this item exceeds the maximum size of a bundle,
- // we can never send it.
- if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
- return ErrOversizedItem
- }
- // If adding this item would exceed our allotted memory footprint, block
- // until space is available. The semaphore is FIFO, so there will be no
- // starvation.
- b.initSemaphores()
- if err := b.sem.Acquire(ctx, int64(size)); err != nil {
- return err
- }
- // Here, we've reserved space for item. Other goroutines can call AddWait
- // and even acquire space, but no one can take away our reservation
- // (assuming sem.Release is used correctly). So there is no race condition
- // resulting from locking the mutex after sem.Acquire returns.
- b.add(item, size)
- return nil
-}
-
-// Flush invokes the handler for all remaining items in the Bundler and waits
-// for it to return.
-func (b *Bundler) Flush() {
- b.mu.Lock()
- b.startFlushLocked()
- // Here, all bundles with tickets < b.nextTicket are
- // either finished or active. Those are the ones
- // we want to wait for.
- t := b.nextTicket
- b.mu.Unlock()
- b.initSemaphores()
- b.waitUntilAllHandled(t)
-}
-
-func (b *Bundler) startFlushLocked() {
- if b.flushTimer != nil {
- b.flushTimer.Stop()
- b.flushTimer = nil
- }
- if b.curBundle.items.Len() == 0 {
- return
- }
- // Here, both semaphores must have been initialized.
- bun := b.curBundle
- b.curBundle = bundle{items: b.itemSliceZero}
- ticket := b.nextTicket
- b.nextTicket++
- go func() {
- defer func() {
- b.sem.Release(int64(bun.size))
- b.release(ticket)
- }()
- b.acquire(ticket)
- b.handler(bun.items.Interface())
- }()
-}
-
-// acquire blocks until ticket is the next to be served, then returns. In order for N
-// acquire calls to return, the tickets must be in the range [0, N). A ticket must
-// not be presented to acquire more than once.
-func (b *Bundler) acquire(ticket uint64) {
- b.mu2.Lock()
- defer b.mu2.Unlock()
- if ticket < b.nextHandled {
- panic("bundler: acquire: arg too small")
- }
- for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) {
- b.cond.Wait()
- }
- // Here,
- // ticket == b.nextHandled: the caller is the next one to be handled;
- // and len(b.active) < b.HandlerLimit: there is space available.
- b.active[ticket] = true
- b.nextHandled++
- // Broadcast, not Signal: although at most one acquire waiter can make progress,
- // there might be waiters in waitUntilAllHandled.
- b.cond.Broadcast()
-}
-
-// If a ticket is used for a call to acquire, it must later be passed to release. A
-// ticket must not be presented to release more than once.
-func (b *Bundler) release(ticket uint64) {
- b.mu2.Lock()
- defer b.mu2.Unlock()
- if !b.active[ticket] {
- panic("bundler: release: not an active ticket")
- }
- delete(b.active, ticket)
- b.cond.Broadcast()
-}
-
-// waitUntilAllHandled blocks until all tickets < n have called release, meaning
-// all bundles with tickets < n have been handled.
-func (b *Bundler) waitUntilAllHandled(n uint64) {
- // Proof of correctness of this function.
- // "N is acquired" means acquire(N) has returned.
- // "N is released" means release(N) has returned.
- // 1. If N is acquired, N-1 is acquired.
- // Follows from the loop test in acquire, and the fact
- // that nextHandled is incremented by 1.
- // 2. If nextHandled >= N, then N-1 is acquired.
- // Because we only increment nextHandled to N after N-1 is acquired.
- // 3. If nextHandled >= N, then all n < N is acquired.
- // Follows from #1 and #2.
- // 4. If N is acquired and N is not in active, then N is released.
- // Because we put N in active before acquire returns, and only
- // remove it when it is released.
- // Let min(active) be the smallest member of active, or infinity if active is empty.
- // 5. If nextHandled >= N and N <= min(active), then all n < N is released.
- // From nextHandled >= N and #3, all n < N is acquired.
- // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active.
- // So from #4, all n < N is released.
- // The loop test below is the antecedent of #5.
- b.mu2.Lock()
- defer b.mu2.Unlock()
- for !(b.nextHandled >= n && n <= min(b.active)) {
- b.cond.Wait()
- }
-}
-
-// min returns the minimum value of the set s, or the largest uint64 if
-// s is empty.
-func min(s map[uint64]bool) uint64 {
- var m uint64 = math.MaxUint64
- for n := range s {
- if n < m {
- m = n
- }
- }
- return m
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
deleted file mode 100644
index 26021b0f9..000000000
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/api/httpbody.proto
-
-package httpbody // import "google.golang.org/genproto/googleapis/api/httpbody"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import any "github.com/golang/protobuf/ptypes/any"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// Message that represents an arbitrary HTTP body. It should only be used for
-// payload formats that can't be represented as JSON, such as raw binary or
-// an HTML page.
-//
-//
-// This message can be used both in streaming and non-streaming API methods in
-// the request as well as the response.
-//
-// It can be used as a top-level request field, which is convenient if one
-// wants to extract parameters from either the URL or HTTP template into the
-// request fields and also want access to the raw HTTP body.
-//
-// Example:
-//
-// message GetResourceRequest {
-// // A unique request id.
-// string request_id = 1;
-//
-// // The raw HTTP body is bound to this field.
-// google.api.HttpBody http_body = 2;
-// }
-//
-// service ResourceService {
-// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);
-// rpc UpdateResource(google.api.HttpBody) returns
-// (google.protobuf.Empty);
-// }
-//
-// Example with streaming methods:
-//
-// service CaldavService {
-// rpc GetCalendar(stream google.api.HttpBody)
-// returns (stream google.api.HttpBody);
-// rpc UpdateCalendar(stream google.api.HttpBody)
-// returns (stream google.api.HttpBody);
-// }
-//
-// Use of this type only changes how the request and response bodies are
-// handled, all other features will continue to work unchanged.
-type HttpBody struct {
- // The HTTP Content-Type header value specifying the content type of the body.
- ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
- // The HTTP request/response body as raw binary.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- // Application specific response metadata. Must be set in the first response
- // for streaming APIs.
- Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HttpBody) Reset() { *m = HttpBody{} }
-func (m *HttpBody) String() string { return proto.CompactTextString(m) }
-func (*HttpBody) ProtoMessage() {}
-func (*HttpBody) Descriptor() ([]byte, []int) {
- return fileDescriptor_httpbody_45db50668f1dc1dc, []int{0}
-}
-func (m *HttpBody) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HttpBody.Unmarshal(m, b)
-}
-func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic)
-}
-func (dst *HttpBody) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HttpBody.Merge(dst, src)
-}
-func (m *HttpBody) XXX_Size() int {
- return xxx_messageInfo_HttpBody.Size(m)
-}
-func (m *HttpBody) XXX_DiscardUnknown() {
- xxx_messageInfo_HttpBody.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HttpBody proto.InternalMessageInfo
-
-func (m *HttpBody) GetContentType() string {
- if m != nil {
- return m.ContentType
- }
- return ""
-}
-
-func (m *HttpBody) GetData() []byte {
- if m != nil {
- return m.Data
- }
- return nil
-}
-
-func (m *HttpBody) GetExtensions() []*any.Any {
- if m != nil {
- return m.Extensions
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody")
-}
-
-func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_httpbody_45db50668f1dc1dc) }
-
-var fileDescriptor_httpbody_45db50668f1dc1dc = []byte{
- // 229 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30,
- 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09,
- 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7,
- 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf,
- 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc,
- 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c,
- 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e,
- 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35,
- 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c,
- 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b,
- 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52,
- 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38,
- 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec,
- 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16,
- 0x2b, 0x2d, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go
deleted file mode 100644
index 86886693f..000000000
--- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/field_mask.proto
-
-package field_mask // import "google.golang.org/genproto/protobuf/field_mask"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// `FieldMask` represents a set of symbolic field paths, for example:
-//
-// paths: "f.a"
-// paths: "f.b.d"
-//
-// Here `f` represents a field in some root message, `a` and `b`
-// fields in the message found in `f`, and `d` a field found in the
-// message in `f.b`.
-//
-// Field masks are used to specify a subset of fields that should be
-// returned by a get operation or modified by an update operation.
-// Field masks also have a custom JSON encoding (see below).
-//
-// # Field Masks in Projections
-//
-// When used in the context of a projection, a response message or
-// sub-message is filtered by the API to only contain those fields as
-// specified in the mask. For example, if the mask in the previous
-// example is applied to a response message as follows:
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// x : 2
-// }
-// y : 13
-// }
-// z: 8
-//
-// The result will not contain specific values for fields x,y and z
-// (their value will be set to the default, and omitted in proto text
-// output):
-//
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// }
-// }
-//
-// A repeated field is not allowed except at the last position of a
-// paths string.
-//
-// If a FieldMask object is not present in a get operation, the
-// operation applies to all fields (as if a FieldMask of all fields
-// had been specified).
-//
-// Note that a field mask does not necessarily apply to the
-// top-level response message. In case of a REST get operation, the
-// field mask applies directly to the response, but in case of a REST
-// list operation, the mask instead applies to each individual message
-// in the returned resource list. In case of a REST custom method,
-// other definitions may be used. Where the mask applies will be
-// clearly documented together with its declaration in the API. In
-// any case, the effect on the returned resource/resources is required
-// behavior for APIs.
-//
-// # Field Masks in Update Operations
-//
-// A field mask in update operations specifies which fields of the
-// targeted resource are going to be updated. The API is required
-// to only change the values of the fields as specified in the mask
-// and leave the others untouched. If a resource is passed in to
-// describe the updated values, the API ignores the values of all
-// fields not covered by the mask.
-//
-// If a repeated field is specified for an update operation, new values will
-// be appended to the existing repeated field in the target resource. Note that
-// a repeated field is only allowed in the last position of a `paths` string.
-//
-// If a sub-message is specified in the last position of the field mask for an
-// update operation, then new value will be merged into the existing sub-message
-// in the target resource.
-//
-// For example, given the target message:
-//
-// f {
-// b {
-// d: 1
-// x: 2
-// }
-// c: [1]
-// }
-//
-// And an update message:
-//
-// f {
-// b {
-// d: 10
-// }
-// c: [2]
-// }
-//
-// then if the field mask is:
-//
-// paths: ["f.b", "f.c"]
-//
-// then the result will be:
-//
-// f {
-// b {
-// d: 10
-// x: 2
-// }
-// c: [1, 2]
-// }
-//
-// An implementation may provide options to override this default behavior for
-// repeated and message fields.
-//
-// In order to reset a field's value to the default, the field must
-// be in the mask and set to the default value in the provided resource.
-// Hence, in order to reset all fields of a resource, provide a default
-// instance of the resource and set all fields in the mask, or do
-// not provide a mask as described below.
-//
-// If a field mask is not present on update, the operation applies to
-// all fields (as if a field mask of all fields has been specified).
-// Note that in the presence of schema evolution, this may mean that
-// fields the client does not know and has therefore not filled into
-// the request will be reset to their default. If this is unwanted
-// behavior, a specific service may require a client to always specify
-// a field mask, producing an error if not.
-//
-// As with get operations, the location of the resource which
-// describes the updated values in the request message depends on the
-// operation kind. In any case, the effect of the field mask is
-// required to be honored by the API.
-//
-// ## Considerations for HTTP REST
-//
-// The HTTP kind of an update operation which uses a field mask must
-// be set to PATCH instead of PUT in order to satisfy HTTP semantics
-// (PUT must only be used for full updates).
-//
-// # JSON Encoding of Field Masks
-//
-// In JSON, a field mask is encoded as a single string where paths are
-// separated by a comma. Fields name in each path are converted
-// to/from lower-camel naming conventions.
-//
-// As an example, consider the following message declarations:
-//
-// message Profile {
-// User user = 1;
-// Photo photo = 2;
-// }
-// message User {
-// string display_name = 1;
-// string address = 2;
-// }
-//
-// In proto a field mask for `Profile` may look as such:
-//
-// mask {
-// paths: "user.display_name"
-// paths: "photo"
-// }
-//
-// In JSON, the same mask is represented as below:
-//
-// {
-// mask: "user.displayName,photo"
-// }
-//
-// # Field Masks and Oneof Fields
-//
-// Field masks treat fields in oneofs just as regular fields. Consider the
-// following message:
-//
-// message SampleMessage {
-// oneof test_oneof {
-// string name = 4;
-// SubMessage sub_message = 9;
-// }
-// }
-//
-// The field mask can be:
-//
-// mask {
-// paths: "name"
-// }
-//
-// Or:
-//
-// mask {
-// paths: "sub_message"
-// }
-//
-// Note that oneof type names ("test_oneof" in this case) cannot be used in
-// paths.
-//
-// ## Field Mask Verification
-//
-// The implementation of any API method which has a FieldMask type field in the
-// request should verify the included field paths, and return an
-// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
-type FieldMask struct {
- // The set of field mask paths.
- Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FieldMask) Reset() { *m = FieldMask{} }
-func (m *FieldMask) String() string { return proto.CompactTextString(m) }
-func (*FieldMask) ProtoMessage() {}
-func (*FieldMask) Descriptor() ([]byte, []int) {
- return fileDescriptor_field_mask_02a8b0c0831edcce, []int{0}
-}
-func (m *FieldMask) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FieldMask.Unmarshal(m, b)
-}
-func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
-}
-func (dst *FieldMask) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldMask.Merge(dst, src)
-}
-func (m *FieldMask) XXX_Size() int {
- return xxx_messageInfo_FieldMask.Size(m)
-}
-func (m *FieldMask) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldMask.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldMask proto.InternalMessageInfo
-
-func (m *FieldMask) GetPaths() []string {
- if m != nil {
- return m.Paths
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
-}
-
-func init() {
- proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_02a8b0c0831edcce)
-}
-
-var fileDescriptor_field_mask_02a8b0c0831edcce = []byte{
- // 175 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
- 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
- 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
- 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c,
- 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01,
- 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa,
- 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4,
- 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a,
- 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24,
- 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e1f24eb19..a4d0e2d0b 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -4,27 +4,22 @@ bazil.org/fuse/fs
bazil.org/fuse/fuseutil
# cloud.google.com/go v0.37.4
cloud.google.com/go/compute/metadata
-# contrib.go.opencensus.io/exporter/ocagent v0.4.12
-contrib.go.opencensus.io/exporter/ocagent
# github.com/Azure/azure-sdk-for-go v27.3.0+incompatible
github.com/Azure/azure-sdk-for-go/storage
github.com/Azure/azure-sdk-for-go/version
-# github.com/Azure/go-autorest v12.0.0+incompatible
+# github.com/Azure/go-autorest/autorest v0.9.2
github.com/Azure/go-autorest/autorest
github.com/Azure/go-autorest/autorest/azure
+# github.com/Azure/go-autorest/autorest/adal v0.5.0
github.com/Azure/go-autorest/autorest/adal
-github.com/Azure/go-autorest/logger
-github.com/Azure/go-autorest/tracing
+# github.com/Azure/go-autorest/autorest/date v0.1.0
github.com/Azure/go-autorest/autorest/date
+# github.com/Azure/go-autorest/logger v0.1.0
+github.com/Azure/go-autorest/logger
+# github.com/Azure/go-autorest/tracing v0.5.0
+github.com/Azure/go-autorest/tracing
# github.com/cenkalti/backoff v2.1.1+incompatible
github.com/cenkalti/backoff
-# github.com/census-instrumentation/opencensus-proto v0.2.0
-github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1
# github.com/cpuguy83/go-md2man v1.0.10
github.com/cpuguy83/go-md2man/md2man
# github.com/dgrijalva/jwt-go v3.2.0+incompatible
@@ -35,27 +30,16 @@ github.com/elithrar/simple-scrypt
github.com/go-ini/ini
# github.com/golang/protobuf v1.3.1
github.com/golang/protobuf/proto
-github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes
-github.com/golang/protobuf/ptypes/wrappers
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
-github.com/golang/protobuf/jsonpb
-github.com/golang/protobuf/protoc-gen-go/generator
-github.com/golang/protobuf/ptypes/struct
-github.com/golang/protobuf/protoc-gen-go/descriptor
-github.com/golang/protobuf/protoc-gen-go/generator/internal/remap
-github.com/golang/protobuf/protoc-gen-go/plugin
+github.com/golang/protobuf/ptypes/timestamp
# github.com/google/go-cmp v0.2.0
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/cmpopts
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/grpc-ecosystem/grpc-gateway v1.8.5
-github.com/grpc-ecosystem/grpc-gateway/runtime
-github.com/grpc-ecosystem/grpc-gateway/utilities
-github.com/grpc-ecosystem/grpc-gateway/internal
# github.com/hashicorp/golang-lru v0.5.1
github.com/hashicorp/golang-lru/simplelru
# github.com/inconshreveable/mousetrap v1.0.0
@@ -68,9 +52,9 @@ github.com/kr/fs
github.com/kurin/blazer/b2
github.com/kurin/blazer/base
github.com/kurin/blazer/internal/b2assets
+github.com/kurin/blazer/internal/b2types
github.com/kurin/blazer/internal/blog
github.com/kurin/blazer/x/window
-github.com/kurin/blazer/internal/b2types
# github.com/marstr/guid v1.1.0
github.com/marstr/guid
# github.com/mattn/go-isatty v0.0.7
@@ -106,54 +90,52 @@ github.com/spf13/cobra/doc
# github.com/spf13/pflag v1.0.3
github.com/spf13/pflag
# go.opencensus.io v0.20.2
-go.opencensus.io/plugin/ochttp
-go.opencensus.io/plugin/ochttp/propagation/tracecontext
-go.opencensus.io/stats/view
-go.opencensus.io/trace
-go.opencensus.io/plugin/ochttp/propagation/b3
-go.opencensus.io/stats
-go.opencensus.io/tag
-go.opencensus.io/trace/propagation
go.opencensus.io
-go.opencensus.io/plugin/ocgrpc
-go.opencensus.io/resource
-go.opencensus.io/trace/tracestate
+go.opencensus.io/internal
go.opencensus.io/internal/tagencoding
go.opencensus.io/metric/metricdata
go.opencensus.io/metric/metricproducer
+go.opencensus.io/plugin/ochttp
+go.opencensus.io/plugin/ochttp/propagation/b3
+go.opencensus.io/resource
+go.opencensus.io/stats
go.opencensus.io/stats/internal
-go.opencensus.io/internal
+go.opencensus.io/stats/view
+go.opencensus.io/tag
+go.opencensus.io/trace
go.opencensus.io/trace/internal
+go.opencensus.io/trace/propagation
+go.opencensus.io/trace/tracestate
# golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
-golang.org/x/crypto/ssh/terminal
-golang.org/x/crypto/poly1305
-golang.org/x/crypto/scrypt
+golang.org/x/crypto/argon2
+golang.org/x/crypto/blake2b
+golang.org/x/crypto/cast5
+golang.org/x/crypto/curve25519
+golang.org/x/crypto/ed25519
+golang.org/x/crypto/ed25519/internal/edwards25519
+golang.org/x/crypto/internal/chacha20
+golang.org/x/crypto/internal/subtle
golang.org/x/crypto/openpgp
-golang.org/x/crypto/ssh
-golang.org/x/crypto/pbkdf2
golang.org/x/crypto/openpgp/armor
+golang.org/x/crypto/openpgp/elgamal
golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
-golang.org/x/crypto/argon2
-golang.org/x/crypto/curve25519
-golang.org/x/crypto/ed25519
-golang.org/x/crypto/internal/chacha20
-golang.org/x/crypto/cast5
-golang.org/x/crypto/openpgp/elgamal
-golang.org/x/crypto/blake2b
-golang.org/x/crypto/ed25519/internal/edwards25519
-golang.org/x/crypto/internal/subtle
+golang.org/x/crypto/pbkdf2
+golang.org/x/crypto/poly1305
+golang.org/x/crypto/scrypt
+golang.org/x/crypto/ssh
+golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20190424024845-afe8014c977f
-golang.org/x/net/context/ctxhttp
-golang.org/x/net/http2
golang.org/x/net/context
+golang.org/x/net/context/ctxhttp
golang.org/x/net/http/httpguts
+golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
+golang.org/x/net/internal/timeseries
golang.org/x/net/publicsuffix
golang.org/x/net/trace
-golang.org/x/net/internal/timeseries
# golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
golang.org/x/oauth2
golang.org/x/oauth2/google
@@ -162,56 +144,54 @@ golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sync/errgroup
-golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20190422165155-953cdadca894
+golang.org/x/sys/cpu
golang.org/x/sys/unix
golang.org/x/sys/windows
-golang.org/x/sys/cpu
# golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
-golang.org/x/text/encoding/unicode
golang.org/x/text/encoding
golang.org/x/text/encoding/internal
golang.org/x/text/encoding/internal/identifier
+golang.org/x/text/encoding/unicode
golang.org/x/text/internal/utf8internal
golang.org/x/text/runes
-golang.org/x/text/transform
golang.org/x/text/secure/bidirule
+golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# google.golang.org/api v0.3.2
-google.golang.org/api/googleapi
-google.golang.org/api/storage/v1
-google.golang.org/api/googleapi/internal/uritemplates
google.golang.org/api/gensupport
-google.golang.org/api/option
-google.golang.org/api/transport/http
-google.golang.org/api/internal
+google.golang.org/api/googleapi
+google.golang.org/api/googleapi/internal/uritemplates
google.golang.org/api/googleapi/transport
+google.golang.org/api/internal
+google.golang.org/api/option
+google.golang.org/api/storage/v1
+google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
-google.golang.org/api/support/bundler
# google.golang.org/appengine v1.5.0
google.golang.org/appengine
-google.golang.org/appengine/urlfetch
google.golang.org/appengine/internal
google.golang.org/appengine/internal/app_identity
-google.golang.org/appengine/internal/modules
-google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/internal/base
google.golang.org/appengine/internal/datastore
google.golang.org/appengine/internal/log
+google.golang.org/appengine/internal/modules
google.golang.org/appengine/internal/remote_api
+google.golang.org/appengine/internal/urlfetch
+google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7
google.golang.org/genproto/googleapis/rpc/status
-google.golang.org/genproto/googleapis/api/httpbody
-google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.20.1
google.golang.org/grpc
-google.golang.org/grpc/naming
google.golang.org/grpc/balancer
+google.golang.org/grpc/balancer/base
google.golang.org/grpc/balancer/roundrobin
+google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
+google.golang.org/grpc/credentials/internal
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/grpclog
@@ -223,9 +203,11 @@ google.golang.org/grpc/internal/channelz
google.golang.org/grpc/internal/envconfig
google.golang.org/grpc/internal/grpcrand
google.golang.org/grpc/internal/grpcsync
+google.golang.org/grpc/internal/syscall
google.golang.org/grpc/internal/transport
google.golang.org/grpc/keepalive
google.golang.org/grpc/metadata
+google.golang.org/grpc/naming
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
google.golang.org/grpc/resolver/dns
@@ -233,10 +215,6 @@ google.golang.org/grpc/resolver/passthrough
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-google.golang.org/grpc/balancer/base
-google.golang.org/grpc/credentials/internal
-google.golang.org/grpc/binarylog/grpc_binarylog_v1
-google.golang.org/grpc/internal/syscall
# gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
gopkg.in/tomb.v2
# gopkg.in/yaml.v2 v2.2.2