forked from TrueCloudLab/restic
Update dependenciess
Exclude minio-go for now (pin to 3.x.y).
This commit is contained in:
parent
9d0f13c4c0
commit
946c8399e2
2985 changed files with 1008107 additions and 118934 deletions
38
Gopkg.lock
generated
38
Gopkg.lock
generated
|
@ -10,20 +10,20 @@
|
|||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "eaddaf6dd7ee35fd3c2420c8d27478db176b0485"
|
||||
version = "v0.15.0"
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "509eea43b93cec2f3f17acbe2578ef58703923f8"
|
||||
version = "v11.1.1-beta"
|
||||
revision = "7692b0cef22674113fcf71cc17ac3ccc1a7fef48"
|
||||
version = "v11.2.2-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "7aa5b8a6f18b5c15910c767ab005fc4585221177"
|
||||
version = "v9.1.1"
|
||||
revision = "c67b24a8e30d876542a85022ebbdecf0e5a935e8"
|
||||
version = "v9.4.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cenkalti/backoff"
|
||||
|
@ -47,7 +47,7 @@
|
|||
branch = "master"
|
||||
name = "github.com/dustin/go-humanize"
|
||||
packages = ["."]
|
||||
revision = "77ed807830b4df581417e7f89eb81d4872832b72"
|
||||
revision = "bb3d318650d48840a39aa21a027c6630e198e626"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/elithrar/simple-scrypt"
|
||||
|
@ -58,14 +58,14 @@
|
|||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "5b3e00af70a9484542169a976dcab8d03e601a17"
|
||||
version = "v1.30.0"
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
|
@ -77,7 +77,7 @@
|
|||
branch = "master"
|
||||
name = "github.com/juju/ratelimit"
|
||||
packages = ["."]
|
||||
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -158,10 +158,10 @@
|
|||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
|
@ -173,31 +173,31 @@
|
|||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
|
||||
revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686"
|
||||
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp"]
|
||||
revision = "cd69bc3fc700721b709c3a59e16e24c67b58f6ff"
|
||||
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
|
||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "8dbc5d05d6edcc104950cc299a1ce6641235bc86"
|
||||
revision = "8b4580aae2a0dd0c231a45d3ccb8434ff533b840"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "7afc123cf726cd2f253faa3e144d2ab65477b18f"
|
||||
revision = "3a1d936b7575b82197a1fea0632218dd07b1e65c"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
|
@ -209,11 +209,11 @@
|
|||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
revision = "287cf08546ab5e7e37d55a84f7ed3fd1db036de5"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "f0a207197cb502238ac87ca8e07b2640c02ec380a50b036e09ef87e40e31ca2d"
|
||||
inputs-digest = "c6e2522d1b0c6832101ba15fc062074ad790648e26f481e3419a171d3579bfc4"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -19,3 +19,7 @@
|
|||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/minio/minio-go"
|
||||
version = "3.0.0"
|
||||
|
|
9
vendor/cloud.google.com/go/.travis.yml
generated
vendored
9
vendor/cloud.google.com/go/.travis.yml
generated
vendored
|
@ -4,11 +4,16 @@ go:
|
|||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
|
||||
- tar xvf keys.tar
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
|
||||
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
|
||||
./run-tests.sh $TRAVIS_COMMIT
|
||||
env:
|
||||
matrix:
|
||||
|
|
20
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
20
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -31,9 +31,12 @@ To run the integrations tests, creating and configuration of a project in the
|
|||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
|
||||
(or **Editor** and **Logs Configuration Writer** roles) are added to the
|
||||
service account.
|
||||
Ensure the project-level **Owner**
|
||||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
|
||||
service account. Alternatively, the account can be granted all of the following roles:
|
||||
- **Editor**
|
||||
- **Logs Configuration Writer**
|
||||
- **PubSub Admin**
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
run the against the actual APIs.
|
||||
|
@ -69,10 +72,15 @@ $ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
|||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Create a PubSub topic for integration tests of storage notifications.
|
||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||
|
||||
# Create a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
|
||||
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
|
@ -88,9 +96,9 @@ Before we can accept your pull requests you'll need to sign a Contributor
|
|||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your
|
||||
work**, then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
|
29
vendor/cloud.google.com/go/README.md
generated
vendored
29
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -33,10 +33,37 @@ make backwards-incompatible changes.
|
|||
|
||||
## News
|
||||
|
||||
*v0.15.0*
|
||||
_October 30, 2017_
|
||||
|
||||
*v0.16.0*
|
||||
|
||||
- Other bigquery changes:
|
||||
- `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
|
||||
- UseStandardSQL is deprecated; set UseLegacySQL to true if you need
|
||||
Legacy SQL.
|
||||
- Uploader.Put will generate a random insert ID if you do not provide one.
|
||||
- Support time partitioning for load jobs.
|
||||
- Support dry-run queries.
|
||||
- A `Job` remembers its last retrieved status.
|
||||
- Support retrieving job configuration.
|
||||
- Support labels for jobs and tables.
|
||||
- Support dataset access lists.
|
||||
- Improve support for external data sources, including data from Bigtable and
|
||||
Google Sheets, and tables with external data.
|
||||
- Support updating a table's view configuration.
|
||||
- Fix uploading civil times with nanoseconds.
|
||||
|
||||
- storage:
|
||||
- Support PubSub notifications.
|
||||
- Support Requester Pays buckets.
|
||||
|
||||
- profiler: Support goroutine and mutex profile types.
|
||||
|
||||
|
||||
_October 3, 2017_
|
||||
|
||||
*v0.15.0*
|
||||
|
||||
- firestore: beta release. See the
|
||||
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
|
||||
|
||||
|
|
8
vendor/cloud.google.com/go/bigquery/benchmarks/README.md
generated
vendored
Normal file
8
vendor/cloud.google.com/go/bigquery/benchmarks/README.md
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
# BigQuery Benchmark
|
||||
This directory contains benchmarks for BigQuery client.
|
||||
|
||||
## Usage
|
||||
`go run bench.go -- <your project id> queries.json`
|
||||
|
||||
BigQuery service caches requests so the benchmark should be run
|
||||
at least twice, disregarding the first result.
|
86
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
generated
vendored
Normal file
86
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"google.golang.org/api/iterator"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := bigquery.NewClient(ctx, flag.Arg(0))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var queries []string
|
||||
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
doQuery(ctx, c, q)
|
||||
}
|
||||
}
|
||||
|
||||
func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
|
||||
startTime := time.Now()
|
||||
q := c.Query(qt)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
numRows, numCols := 0, 0
|
||||
var firstByte time.Duration
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if numRows == 0 {
|
||||
numCols = len(values)
|
||||
firstByte = time.Since(startTime)
|
||||
} else if numCols != len(values) {
|
||||
log.Fatalf("got %d columns, want %d", len(values), numCols)
|
||||
}
|
||||
numRows++
|
||||
}
|
||||
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
|
||||
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
|
||||
}
|
10
vendor/cloud.google.com/go/bigquery/benchmarks/queries.json
generated
vendored
Normal file
10
vendor/cloud.google.com/go/bigquery/benchmarks/queries.json
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
[
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
|
||||
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
|
||||
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
|
||||
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
|
||||
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
|
||||
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
|
||||
]
|
111
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
111
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
|
@ -14,11 +14,18 @@
|
|||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
|
||||
|
@ -26,20 +33,22 @@ import (
|
|||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
const (
|
||||
prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
userAgent = "gcloud-golang-bigquery/20160429"
|
||||
)
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference.
|
||||
type ExternalData interface {
|
||||
externalDataConfig() bq.ExternalDataConfiguration
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
const userAgent = "gcloud-golang-bigquery/20160429"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
bqs *bq.Service
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
|
@ -53,17 +62,16 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
|
|||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
return nil, fmt.Errorf("bigquery: dialing: %v", err)
|
||||
}
|
||||
|
||||
s, err := newBigqueryService(httpClient, endpoint)
|
||||
bqs, err := bq.New(httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
return nil, fmt.Errorf("bigquery: constructing client: %v", err)
|
||||
}
|
||||
|
||||
bqs.BasePath = endpoint
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: projectID,
|
||||
bqs: bqs,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
@ -75,11 +83,74 @@ func (c *Client) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) insertJob(ctx context.Context, conf *insertJobConf) (*Job, error) {
|
||||
job, err := c.service.insertJob(ctx, c.projectID, conf)
|
||||
// Calls the Jobs.Insert RPC and returns a Job.
|
||||
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
|
||||
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if media != nil {
|
||||
call.Media(media)
|
||||
}
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if job.JobReference != nil && media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
return bqToJob(res, c)
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// This is the correct definition of retryable according to the BigQuery team.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
||||
|
|
64
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
64
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
|
@ -21,12 +21,6 @@ import (
|
|||
|
||||
// CopyConfig holds the configuration for a copy job.
|
||||
type CopyConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Srcs are the tables from which data will be copied.
|
||||
Srcs []*Table
|
||||
|
||||
|
@ -38,18 +32,51 @@ type CopyConfig struct {
|
|||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
// The default is WriteEmpty.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||
var ts []*bq.TableReference
|
||||
for _, t := range c.Srcs {
|
||||
ts = append(ts, t.toBQ())
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: c.Labels,
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.toBQ(),
|
||||
SourceTables: ts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
|
||||
cc := &CopyConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
|
||||
Dst: bqToTable(q.Copy.DestinationTable, c),
|
||||
}
|
||||
for _, t := range q.Copy.SourceTables {
|
||||
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
|
||||
type Copier struct {
|
||||
JobIDConfig
|
||||
CopyConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// CopierFrom returns a Copier which can be used to copy data into a
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// The returned Copier may optionally be further configured before its Run method is called.
|
||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
||||
return &Copier{
|
||||
|
@ -63,17 +90,12 @@ func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
|||
|
||||
// Run initiates a copy job.
|
||||
func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.tableRefProto(),
|
||||
}
|
||||
for _, t := range c.Srcs {
|
||||
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
|
||||
}
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(c.JobID, c.AddJobIDSuffix, c.c.projectID),
|
||||
Configuration: &bq.JobConfiguration{Copy: conf},
|
||||
}
|
||||
return c.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
return c.c.insertJob(ctx, c.newJob(), nil)
|
||||
}
|
||||
|
||||
func (c *Copier) newJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: c.JobIDConfig.createJobRef(c.c.projectID),
|
||||
Configuration: c.CopyConfig.toBQ(),
|
||||
}
|
||||
}
|
||||
|
|
35
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
35
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
|
@ -17,7 +17,10 @@ package bigquery
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
|
@ -44,10 +47,11 @@ func defaultCopyJob() *bq.Job {
|
|||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
srcs []*Table
|
||||
jobID string
|
||||
config CopyConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
|
@ -82,9 +86,11 @@ func TestCopy(t *testing.T) {
|
|||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
|
@ -103,7 +109,7 @@ func TestCopy(t *testing.T) {
|
|||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{JobID: "job-id"},
|
||||
jobID: "job-id",
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference.JobId = "job-id"
|
||||
|
@ -111,22 +117,25 @@ func TestCopy(t *testing.T) {
|
|||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
c := &Client{projectID: "client-project-id"}
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
tc.dst.c = c
|
||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||
copier.JobID = tc.jobID
|
||||
tc.config.Srcs = tc.srcs
|
||||
tc.config.Dst = tc.dst
|
||||
copier.CopyConfig = tc.config
|
||||
if _, err := copier.Run(context.Background()); err != nil {
|
||||
t.Errorf("#%d: err calling Run: %v", i, err)
|
||||
continue
|
||||
got := copier.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
|
||||
cmpopts.IgnoreUnexported(Table{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
325
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
325
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
|
@ -15,11 +15,14 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
|
@ -38,6 +41,7 @@ type DatasetMetadata struct {
|
|||
Location string // The geo location of the dataset.
|
||||
DefaultTableExpiration time.Duration // The default expiration time for new tables.
|
||||
Labels map[string]string // User-provided labels.
|
||||
Access []*AccessEntry // Access permissions.
|
||||
|
||||
// These fields are read-only.
|
||||
CreationTime time.Time
|
||||
|
@ -47,7 +51,6 @@ type DatasetMetadata struct {
|
|||
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
|
||||
// ensure that the metadata hasn't changed since it was read.
|
||||
ETag string
|
||||
// TODO(jba): access rules
|
||||
}
|
||||
|
||||
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
|
||||
|
@ -55,30 +58,15 @@ type DatasetMetadata struct {
|
|||
type DatasetMetadataToUpdate struct {
|
||||
Description optional.String // The user-friendly description of this table.
|
||||
Name optional.String // The user-friendly name for this dataset.
|
||||
|
||||
// DefaultTableExpiration is the the default expiration time for new tables.
|
||||
// If set to time.Duration(0), new tables never expire.
|
||||
DefaultTableExpiration optional.Duration
|
||||
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
// The entire access list. It is not possible to replace individual entries.
|
||||
Access []*AccessEntry
|
||||
|
||||
// SetLabel causes a label to be added or modified when dm is used
|
||||
// in a call to Dataset.Update.
|
||||
func (dm *DatasetMetadataToUpdate) SetLabel(name, value string) {
|
||||
if dm.setLabels == nil {
|
||||
dm.setLabels = map[string]string{}
|
||||
}
|
||||
dm.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted when dm is used in a
|
||||
// call to Dataset.Update.
|
||||
func (dm *DatasetMetadataToUpdate) DeleteLabel(name string) {
|
||||
if dm.deleteLabels == nil {
|
||||
dm.deleteLabels = map[string]bool{}
|
||||
}
|
||||
dm.deleteLabels[name] = true
|
||||
labelUpdater
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
|
@ -98,17 +86,100 @@ func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
|||
// Create creates a dataset in the BigQuery service. An error will be returned if the
|
||||
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
|
||||
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
|
||||
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID, md)
|
||||
ds, err := md.toBQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
|
||||
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
_, err = call.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
if dm == nil {
|
||||
return ds, nil
|
||||
}
|
||||
ds.FriendlyName = dm.Name
|
||||
ds.Description = dm.Description
|
||||
ds.Location = dm.Location
|
||||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
|
||||
ds.Labels = dm.Labels
|
||||
var err error
|
||||
ds.Access, err = accessListToBQ(dm.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !dm.CreationTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
|
||||
}
|
||||
if !dm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
|
||||
}
|
||||
if dm.FullID != "" {
|
||||
return nil, errors.New("bigquery: Dataset.FullID is not writable")
|
||||
}
|
||||
if dm.ETag != "" {
|
||||
return nil, errors.New("bigquery: Dataset.ETag is not writable")
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
|
||||
var q []*bq.DatasetAccess
|
||||
for _, e := range a {
|
||||
a, err := e.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q = append(q, a)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
func (d *Dataset) Delete(ctx context.Context) error {
|
||||
return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return call.Do()
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the dataset.
|
||||
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
||||
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
|
||||
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var ds *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToDatasetMetadata(ds)
|
||||
}
|
||||
|
||||
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
|
||||
dm := &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
FullID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
ETag: d.Etag,
|
||||
}
|
||||
for _, a := range d.Access {
|
||||
e, err := bqToAccessEntry(a, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dm.Access = append(dm.Access, e)
|
||||
}
|
||||
return dm, nil
|
||||
}
|
||||
|
||||
// Update modifies specific Dataset metadata fields.
|
||||
|
@ -116,7 +187,63 @@ func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
|||
// set the etag argument to the DatasetMetadata.ETag field from the read.
|
||||
// Pass the empty string for etag for a "blind write" that will always succeed.
|
||||
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
|
||||
return d.c.service.patchDataset(ctx, d.ProjectID, d.DatasetID, &dm, etag)
|
||||
ds, err := dm.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var ds2 *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds2, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToDatasetMetadata(ds2)
|
||||
}
|
||||
|
||||
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
forceSend := func(field string) {
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if dm.Description != nil {
|
||||
ds.Description = optional.ToString(dm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if dm.Name != nil {
|
||||
ds.FriendlyName = optional.ToString(dm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if dm.DefaultTableExpiration != nil {
|
||||
dur := optional.ToDuration(dm.DefaultTableExpiration)
|
||||
if dur == 0 {
|
||||
// Send a null to delete the field.
|
||||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
|
||||
} else {
|
||||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
|
||||
}
|
||||
}
|
||||
if dm.Access != nil {
|
||||
var err error
|
||||
ds.Access, err = accessListToBQ(dm.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ds.Access) == 0 {
|
||||
ds.NullFields = append(ds.NullFields, "Access")
|
||||
}
|
||||
}
|
||||
labels, forces, nulls := dm.update()
|
||||
ds.Labels = labels
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
|
||||
ds.NullFields = append(ds.NullFields, nulls...)
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
|
@ -163,16 +290,41 @@ func (it *TableIterator) Next() (*Table, error) {
|
|||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// for testing
|
||||
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
|
||||
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
|
||||
PageToken(pageToken).
|
||||
Context(it.ctx)
|
||||
setClientHeader(call.Header())
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(int64(pageSize))
|
||||
}
|
||||
var res *bq.TableList
|
||||
err := runWithRetry(it.ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken)
|
||||
res, err := listTables(it, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, t := range tables {
|
||||
t.c = it.dataset.c
|
||||
it.tables = append(it.tables, t)
|
||||
for _, t := range res.Tables {
|
||||
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
|
||||
}
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func bqToTable(tr *bq.TableReference, c *Client) *Table {
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
c: c,
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
// Datasets returns an iterator over the datasets in a project.
|
||||
|
@ -232,15 +384,118 @@ func (it *DatasetIterator) Next() (*Dataset, error) {
|
|||
return item, nil
|
||||
}
|
||||
|
||||
// for testing
|
||||
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
|
||||
call := it.c.bqs.Datasets.List(it.ProjectID).
|
||||
Context(it.ctx).
|
||||
PageToken(pageToken).
|
||||
All(it.ListHidden)
|
||||
setClientHeader(call.Header())
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(int64(pageSize))
|
||||
}
|
||||
if it.Filter != "" {
|
||||
call.Filter(it.Filter)
|
||||
}
|
||||
var res *bq.DatasetList
|
||||
err := runWithRetry(it.ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.ProjectID,
|
||||
pageSize, pageToken, it.ListHidden, it.Filter)
|
||||
res, err := listDatasets(it, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, d := range datasets {
|
||||
d.c = it.c
|
||||
it.items = append(it.items, d)
|
||||
for _, d := range res.Datasets {
|
||||
it.items = append(it.items, &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
c: it.c,
|
||||
})
|
||||
}
|
||||
return nextPageToken, nil
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
// An AccessEntry describes the permissions that an entity has on a dataset.
|
||||
type AccessEntry struct {
|
||||
Role AccessRole // The role of the entity
|
||||
EntityType EntityType // The type of entity
|
||||
Entity string // The entity (individual or group) granted access
|
||||
View *Table // The view granted access (EntityType must be ViewEntity)
|
||||
}
|
||||
|
||||
// AccessRole is the level of access to grant to a dataset.
|
||||
type AccessRole string
|
||||
|
||||
const (
|
||||
OwnerRole AccessRole = "OWNER"
|
||||
ReaderRole AccessRole = "READER"
|
||||
WriterRole AccessRole = "WRITER"
|
||||
)
|
||||
|
||||
// EntityType is the type of entity in an AccessEntry.
|
||||
type EntityType int
|
||||
|
||||
const (
|
||||
// A domain (e.g. "example.com")
|
||||
DomainEntity EntityType = iota + 1
|
||||
|
||||
// Email address of a Google Group
|
||||
GroupEmailEntity
|
||||
|
||||
// Email address of an individual user.
|
||||
UserEmailEntity
|
||||
|
||||
// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers.
|
||||
SpecialGroupEntity
|
||||
|
||||
// A BigQuery view.
|
||||
ViewEntity
|
||||
)
|
||||
|
||||
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
|
||||
q := &bq.DatasetAccess{Role: string(e.Role)}
|
||||
switch e.EntityType {
|
||||
case DomainEntity:
|
||||
q.Domain = e.Entity
|
||||
case GroupEmailEntity:
|
||||
q.GroupByEmail = e.Entity
|
||||
case UserEmailEntity:
|
||||
q.UserByEmail = e.Entity
|
||||
case SpecialGroupEntity:
|
||||
q.SpecialGroup = e.Entity
|
||||
case ViewEntity:
|
||||
q.View = e.View.toBQ()
|
||||
default:
|
||||
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
|
||||
e := &AccessEntry{Role: AccessRole(q.Role)}
|
||||
switch {
|
||||
case q.Domain != "":
|
||||
e.Entity = q.Domain
|
||||
e.EntityType = DomainEntity
|
||||
case q.GroupByEmail != "":
|
||||
e.Entity = q.GroupByEmail
|
||||
e.EntityType = GroupEmailEntity
|
||||
case q.UserByEmail != "":
|
||||
e.Entity = q.UserByEmail
|
||||
e.EntityType = UserEmailEntity
|
||||
case q.SpecialGroup != "":
|
||||
e.Entity = q.SpecialGroup
|
||||
e.EntityType = SpecialGroupEntity
|
||||
case q.View != nil:
|
||||
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
|
||||
e.EntityType = ViewEntity
|
||||
default:
|
||||
return nil, errors.New("bigquery: invalid access value")
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
|
278
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
278
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
|
@ -18,24 +18,29 @@ import (
|
|||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
itest "google.golang.org/api/iterator/testing"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
type listTablesStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
tables []*Table
|
||||
service
|
||||
tables []*bq.TableListTables
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
|
||||
if it.dataset.ProjectID != s.expectedProject {
|
||||
return nil, errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
if it.dataset.DatasetID != s.expectedDataset {
|
||||
return nil, errors.New("wrong dataset id")
|
||||
}
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
|
@ -46,7 +51,7 @@ func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datas
|
|||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
end := start + pageSize
|
||||
|
@ -57,100 +62,267 @@ func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datas
|
|||
if end < len(s.tables) {
|
||||
nextPageToken = strconv.Itoa(end)
|
||||
}
|
||||
return s.tables[start:end], nextPageToken, nil
|
||||
return &bq.TableList{
|
||||
Tables: s.tables[start:end],
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
allTables := []*Table{t1, t2, t3}
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
tables: allTables,
|
||||
},
|
||||
projectID: "x",
|
||||
c := &Client{projectID: "p1"}
|
||||
inTables := []*bq.TableListTables{
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
|
||||
}
|
||||
msg, ok := itest.TestIterator(allTables,
|
||||
func() interface{} { return c.Dataset("y").Tables(context.Background()) },
|
||||
outTables := []*Table{
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
|
||||
}
|
||||
|
||||
lts := &listTablesStub{
|
||||
expectedProject: "p1",
|
||||
expectedDataset: "d1",
|
||||
tables: inTables,
|
||||
}
|
||||
old := listTables
|
||||
listTables = lts.listTables // cannot use t.Parallel with this test
|
||||
defer func() { listTables = old }()
|
||||
|
||||
msg, ok := itest.TestIterator(outTables,
|
||||
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
|
||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type listDatasetsFake struct {
|
||||
service
|
||||
|
||||
projectID string
|
||||
datasets []*Dataset
|
||||
hidden map[*Dataset]bool
|
||||
type listDatasetsStub struct {
|
||||
expectedProject string
|
||||
datasets []*bq.DatasetListDatasets
|
||||
hidden map[*bq.DatasetListDatasets]bool
|
||||
}
|
||||
|
||||
func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) {
|
||||
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
if filter != "" {
|
||||
return nil, "", errors.New("filter not supported")
|
||||
if it.Filter != "" {
|
||||
return nil, errors.New("filter not supported")
|
||||
}
|
||||
if projectID != df.projectID {
|
||||
return nil, "", errors.New("bad project ID")
|
||||
if it.ProjectID != s.expectedProject {
|
||||
return nil, errors.New("bad project ID")
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var (
|
||||
i int
|
||||
result []*Dataset
|
||||
result []*bq.DatasetListDatasets
|
||||
nextPageToken string
|
||||
)
|
||||
for i = start; len(result) < pageSize && i < len(df.datasets); i++ {
|
||||
if df.hidden[df.datasets[i]] && !listHidden {
|
||||
for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
|
||||
if s.hidden[s.datasets[i]] && !it.ListHidden {
|
||||
continue
|
||||
}
|
||||
result = append(result, df.datasets[i])
|
||||
result = append(result, s.datasets[i])
|
||||
}
|
||||
if i < len(df.datasets) {
|
||||
if i < len(s.datasets) {
|
||||
nextPageToken = strconv.Itoa(i)
|
||||
}
|
||||
return result, nextPageToken, nil
|
||||
return &bq.DatasetList{
|
||||
Datasets: result,
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestDatasets(t *testing.T) {
|
||||
service := &listDatasetsFake{projectID: "p"}
|
||||
client := &Client{service: service}
|
||||
datasets := []*Dataset{
|
||||
client := &Client{projectID: "p"}
|
||||
inDatasets := []*bq.DatasetListDatasets{
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
|
||||
}
|
||||
outDatasets := []*Dataset{
|
||||
{"p", "a", client},
|
||||
{"p", "b", client},
|
||||
{"p", "hidden", client},
|
||||
{"p", "c", client},
|
||||
}
|
||||
service.datasets = datasets
|
||||
service.hidden = map[*Dataset]bool{datasets[2]: true}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: service,
|
||||
lds := &listDatasetsStub{
|
||||
expectedProject: "p",
|
||||
datasets: inDatasets,
|
||||
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
|
||||
}
|
||||
msg, ok := itest.TestIterator(datasets,
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
old := listDatasets
|
||||
listDatasets = lds.listDatasets // cannot use t.Parallel with this test
|
||||
defer func() { listDatasets = old }()
|
||||
|
||||
msg, ok := itest.TestIterator(outDatasets,
|
||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=true: %s", msg)
|
||||
}
|
||||
|
||||
msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]},
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
|
||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=false: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetToBQ(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *DatasetMetadata
|
||||
want *bq.Dataset
|
||||
}{
|
||||
{nil, &bq.Dataset{}},
|
||||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
|
||||
{&DatasetMetadata{
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
|
||||
}, &bq.Dataset{
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
|
||||
}},
|
||||
} {
|
||||
got, err := test.in.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that non-writeable fields are unset.
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
for _, dm := range []*DatasetMetadata{
|
||||
{CreationTime: aTime},
|
||||
{LastModifiedTime: aTime},
|
||||
{FullID: "x"},
|
||||
{ETag: "e"},
|
||||
} {
|
||||
if _, err := dm.toBQ(); err == nil {
|
||||
t.Errorf("%+v: got nil, want error", dm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQToDatasetMetadata(t *testing.T) {
|
||||
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
cMillis := cTime.UnixNano() / 1e6
|
||||
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
|
||||
mMillis := mTime.UnixNano() / 1e6
|
||||
q := &bq.Dataset{
|
||||
CreationTime: cMillis,
|
||||
LastModifiedTime: mMillis,
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*bq.DatasetAccess{
|
||||
{Role: "READER", UserByEmail: "joe@example.com"},
|
||||
{Role: "WRITER", GroupByEmail: "users@example.com"},
|
||||
},
|
||||
Etag: "etag",
|
||||
}
|
||||
want := &DatasetMetadata{
|
||||
CreationTime: cTime,
|
||||
LastModifiedTime: mTime,
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*AccessEntry{
|
||||
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
|
||||
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
|
||||
},
|
||||
ETag: "etag",
|
||||
}
|
||||
got, err := bqToDatasetMetadata(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
|
||||
dm := DatasetMetadataToUpdate{
|
||||
Description: "desc",
|
||||
Name: "name",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
}
|
||||
dm.SetLabel("label", "value")
|
||||
dm.DeleteLabel("del")
|
||||
|
||||
got, err := dm.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := &bq.Dataset{
|
||||
Description: "desc",
|
||||
FriendlyName: "name",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Labels: map[string]string{"label": "value"},
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
NullFields: []string{"Labels.del"},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertAccessEntry(t *testing.T) {
|
||||
c := &Client{projectID: "pid"}
|
||||
for _, e := range []*AccessEntry{
|
||||
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
|
||||
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
|
||||
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
|
||||
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
|
||||
{Role: ReaderRole, EntityType: ViewEntity,
|
||||
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
|
||||
} {
|
||||
q, err := e.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := bqToAccessEntry(q, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
e := &AccessEntry{Role: ReaderRole, Entity: "e"}
|
||||
if _, err := e.toBQ(); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
|
@ -30,7 +30,7 @@ func (e Error) Error() string {
|
|||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
func bqToError(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
2
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
|
@ -95,7 +95,7 @@ func TestErrorFromErrorProto(t *testing.T) {
|
|||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := errorFromErrorProto(test.in); !testutil.Equal(got, test.want) {
|
||||
if got := bqToError(test.in); !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
|
|
32
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
32
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
|
@ -86,7 +86,18 @@ func ExampleClient_JobFromID() {
|
|||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(job)
|
||||
fmt.Println(job.LastStatus()) // Display the job's status.
|
||||
}
|
||||
|
||||
func ExampleClient_Jobs() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Jobs(ctx)
|
||||
it.State = bigquery.Running // list only running jobs.
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleNewGCSReference() {
|
||||
|
@ -228,6 +239,25 @@ func ExampleJob_Wait() {
|
|||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Config() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jc, err := job.Config()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
copyConfig := jc.(*bigquery.CopyConfig)
|
||||
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
|
||||
}
|
||||
|
||||
func ExampleDataset_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
|
|
398
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
Normal file
398
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
Normal file
|
@ -0,0 +1,398 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"unicode/utf8"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
GoogleSheets DataFormat = "GOOGLE_SHEETS"
|
||||
Bigtable DataFormat = "BIGTABLE"
|
||||
)
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
|
||||
// *ExternalDataConfig.
|
||||
// GCSReference also implements it, for backwards compatibility.
|
||||
type ExternalData interface {
|
||||
toBQ() bq.ExternalDataConfiguration
|
||||
}
|
||||
|
||||
// ExternalDataConfig describes data external to BigQuery that can be used
|
||||
// in queries and to create external tables.
|
||||
type ExternalDataConfig struct {
|
||||
// The format of the data. Required.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// The fully-qualified URIs that point to your
|
||||
// data in Google Cloud. Required.
|
||||
//
|
||||
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
|
||||
// and it must come after the 'bucket' name. Size limits related to load jobs
|
||||
// apply to external data sources.
|
||||
//
|
||||
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
|
||||
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
|
||||
//
|
||||
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
|
||||
// the '*' wildcard character is not allowed.
|
||||
SourceURIs []string
|
||||
|
||||
// The schema of the data. Required for CSV and JSON; disallowed for the
|
||||
// other formats.
|
||||
Schema Schema
|
||||
|
||||
// Try to detect schema and format options automatically.
|
||||
// Any option specified explicitly will be honored.
|
||||
AutoDetect bool
|
||||
|
||||
// The compression type of the data.
|
||||
Compression Compression
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// Additional options for CSV, GoogleSheets and Bigtable formats.
|
||||
Options ExternalDataConfigOptions
|
||||
}
|
||||
|
||||
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
|
||||
q := bq.ExternalDataConfiguration{
|
||||
SourceFormat: string(e.SourceFormat),
|
||||
SourceUris: e.SourceURIs,
|
||||
Autodetect: e.AutoDetect,
|
||||
Compression: string(e.Compression),
|
||||
IgnoreUnknownValues: e.IgnoreUnknownValues,
|
||||
MaxBadRecords: e.MaxBadRecords,
|
||||
}
|
||||
if e.Schema != nil {
|
||||
q.Schema = e.Schema.toBQ()
|
||||
}
|
||||
if e.Options != nil {
|
||||
e.Options.populateExternalDataConfig(&q)
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
|
||||
e := &ExternalDataConfig{
|
||||
SourceFormat: DataFormat(q.SourceFormat),
|
||||
SourceURIs: q.SourceUris,
|
||||
AutoDetect: q.Autodetect,
|
||||
Compression: Compression(q.Compression),
|
||||
IgnoreUnknownValues: q.IgnoreUnknownValues,
|
||||
MaxBadRecords: q.MaxBadRecords,
|
||||
Schema: bqToSchema(q.Schema),
|
||||
}
|
||||
switch {
|
||||
case q.CsvOptions != nil:
|
||||
e.Options = bqToCSVOptions(q.CsvOptions)
|
||||
case q.GoogleSheetsOptions != nil:
|
||||
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
|
||||
case q.BigtableOptions != nil:
|
||||
var err error
|
||||
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// ExternalDataConfigOptions are additional options for external data configurations.
|
||||
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
|
||||
type ExternalDataConfigOptions interface {
|
||||
populateExternalDataConfig(*bq.ExternalDataConfiguration)
|
||||
}
|
||||
|
||||
// CSVOptions are additional options for CSV external data sources.
|
||||
type CSVOptions struct {
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
}
|
||||
|
||||
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
c.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: o.AllowJaggedRows,
|
||||
AllowQuotedNewlines: o.AllowQuotedNewlines,
|
||||
Encoding: string(o.Encoding),
|
||||
FieldDelimiter: o.FieldDelimiter,
|
||||
Quote: o.quote(),
|
||||
SkipLeadingRows: o.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (o *CSVOptions) quote() *string {
|
||||
if o.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if o.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &o.Quote
|
||||
}
|
||||
|
||||
func (o *CSVOptions) setQuote(ps *string) {
|
||||
if ps != nil {
|
||||
o.Quote = *ps
|
||||
if o.Quote == "" {
|
||||
o.ForceZeroQuote = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
|
||||
o := &CSVOptions{
|
||||
AllowJaggedRows: q.AllowJaggedRows,
|
||||
AllowQuotedNewlines: q.AllowQuotedNewlines,
|
||||
Encoding: Encoding(q.Encoding),
|
||||
FieldDelimiter: q.FieldDelimiter,
|
||||
SkipLeadingRows: q.SkipLeadingRows,
|
||||
}
|
||||
o.setQuote(q.Quote)
|
||||
return o
|
||||
}
|
||||
|
||||
// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
|
||||
type GoogleSheetsOptions struct {
|
||||
// The number of rows at the top of a sheet that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
}
|
||||
|
||||
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
|
||||
SkipLeadingRows: o.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
|
||||
return &GoogleSheetsOptions{
|
||||
SkipLeadingRows: q.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
// BigtableOptions are additional options for Bigtable external data sources.
|
||||
type BigtableOptions struct {
|
||||
// A list of column families to expose in the table schema along with their
|
||||
// types. If omitted, all column families are present in the table schema and
|
||||
// their values are read as BYTES.
|
||||
ColumnFamilies []*BigtableColumnFamily
|
||||
|
||||
// If true, then the column families that are not specified in columnFamilies
|
||||
// list are not exposed in the table schema. Otherwise, they are read with BYTES
|
||||
// type values. The default is false.
|
||||
IgnoreUnspecifiedColumnFamilies bool
|
||||
|
||||
// If true, then the rowkey column families will be read and converted to string.
|
||||
// Otherwise they are read with BYTES type values and users need to manually cast
|
||||
// them with CAST if necessary. The default is false.
|
||||
ReadRowkeyAsString bool
|
||||
}
|
||||
|
||||
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
q := &bq.BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
|
||||
ReadRowkeyAsString: o.ReadRowkeyAsString,
|
||||
}
|
||||
for _, f := range o.ColumnFamilies {
|
||||
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
|
||||
}
|
||||
c.BigtableOptions = q
|
||||
}
|
||||
|
||||
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
|
||||
b := &BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
|
||||
ReadRowkeyAsString: q.ReadRowkeyAsString,
|
||||
}
|
||||
for _, f := range q.ColumnFamilies {
|
||||
f2, err := bqToBigtableColumnFamily(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.ColumnFamilies = append(b.ColumnFamilies, f2)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
|
||||
type BigtableColumnFamily struct {
|
||||
// Identifier of the column family.
|
||||
FamilyID string
|
||||
|
||||
// Lists of columns that should be exposed as individual fields as opposed to a
|
||||
// list of (column name, value) pairs. All columns whose qualifier matches a
|
||||
// qualifier in this list can be accessed as .. Other columns can be accessed as
|
||||
// a list through .Column field.
|
||||
Columns []*BigtableColumn
|
||||
|
||||
// The encoding of the values when the type is not STRING. Acceptable encoding values are:
|
||||
// - TEXT - indicates values are alphanumeric text strings.
|
||||
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
|
||||
// This can be overridden for a specific column by listing that column in 'columns' and
|
||||
// specifying an encoding for it.
|
||||
Encoding string
|
||||
|
||||
// If true, only the latest version of values are exposed for all columns in this
|
||||
// column family. This can be overridden for a specific column by listing that
|
||||
// column in 'columns' and specifying a different setting for that column.
|
||||
OnlyReadLatest bool
|
||||
|
||||
// The type to convert the value in cells of this
|
||||
// column family. The values are expected to be encoded using HBase
|
||||
// Bytes.toBytes function when using the BINARY encoding value.
|
||||
// Following BigQuery types are allowed (case-sensitive):
|
||||
// BYTES STRING INTEGER FLOAT BOOLEAN.
|
||||
// The default type is BYTES. This can be overridden for a specific column by
|
||||
// listing that column in 'columns' and specifying a type for it.
|
||||
Type string
|
||||
}
|
||||
|
||||
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
|
||||
q := &bq.BigtableColumnFamily{
|
||||
FamilyId: b.FamilyID,
|
||||
Encoding: b.Encoding,
|
||||
OnlyReadLatest: b.OnlyReadLatest,
|
||||
Type: b.Type,
|
||||
}
|
||||
for _, col := range b.Columns {
|
||||
q.Columns = append(q.Columns, col.toBQ())
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
|
||||
b := &BigtableColumnFamily{
|
||||
FamilyID: q.FamilyId,
|
||||
Encoding: q.Encoding,
|
||||
OnlyReadLatest: q.OnlyReadLatest,
|
||||
Type: q.Type,
|
||||
}
|
||||
for _, col := range q.Columns {
|
||||
c, err := bqToBigtableColumn(col)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Columns = append(b.Columns, c)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BigtableColumn describes how BigQuery should access a Bigtable column.
|
||||
type BigtableColumn struct {
|
||||
// Qualifier of the column. Columns in the parent column family that have this
|
||||
// exact qualifier are exposed as . field. The column field name is the
|
||||
// same as the column qualifier.
|
||||
Qualifier string
|
||||
|
||||
// If the qualifier is not a valid BigQuery field identifier i.e. does not match
|
||||
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
|
||||
// name and is used as field name in queries.
|
||||
FieldName string
|
||||
|
||||
// If true, only the latest version of values are exposed for this column.
|
||||
// See BigtableColumnFamily.OnlyReadLatest.
|
||||
OnlyReadLatest bool
|
||||
|
||||
// The encoding of the values when the type is not STRING.
|
||||
// See BigtableColumnFamily.Encoding
|
||||
Encoding string
|
||||
|
||||
// The type to convert the value in cells of this column.
|
||||
// See BigtableColumnFamily.Type
|
||||
Type string
|
||||
}
|
||||
|
||||
func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
|
||||
q := &bq.BigtableColumn{
|
||||
FieldName: b.FieldName,
|
||||
OnlyReadLatest: b.OnlyReadLatest,
|
||||
Encoding: b.Encoding,
|
||||
Type: b.Type,
|
||||
}
|
||||
if utf8.ValidString(b.Qualifier) {
|
||||
q.QualifierString = b.Qualifier
|
||||
} else {
|
||||
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
|
||||
b := &BigtableColumn{
|
||||
FieldName: q.FieldName,
|
||||
OnlyReadLatest: q.OnlyReadLatest,
|
||||
Encoding: q.Encoding,
|
||||
Type: q.Type,
|
||||
}
|
||||
if q.QualifierString != "" {
|
||||
b.Qualifier = q.QualifierString
|
||||
} else {
|
||||
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Qualifier = string(bytes)
|
||||
}
|
||||
return b, nil
|
||||
}
|
143
vendor/cloud.google.com/go/bigquery/external_test.go
generated
vendored
Normal file
143
vendor/cloud.google.com/go/bigquery/external_test.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
)
|
||||
|
||||
func TestExternalDataConfig(t *testing.T) {
|
||||
// Round-trip of ExternalDataConfig to underlying representation.
|
||||
for i, want := range []*ExternalDataConfig{
|
||||
{
|
||||
SourceFormat: CSV,
|
||||
SourceURIs: []string{"uri"},
|
||||
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
|
||||
AutoDetect: true,
|
||||
Compression: Gzip,
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 17,
|
||||
Options: &CSVOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
FieldDelimiter: "f",
|
||||
Quote: "q",
|
||||
SkipLeadingRows: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
SourceFormat: GoogleSheets,
|
||||
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
|
||||
},
|
||||
{
|
||||
SourceFormat: Bigtable,
|
||||
Options: &BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: true,
|
||||
ReadRowkeyAsString: true,
|
||||
ColumnFamilies: []*BigtableColumnFamily{
|
||||
{
|
||||
FamilyID: "f1",
|
||||
Encoding: "TEXT",
|
||||
OnlyReadLatest: true,
|
||||
Type: "FLOAT",
|
||||
Columns: []*BigtableColumn{
|
||||
{
|
||||
Qualifier: "valid-utf-8",
|
||||
FieldName: "fn",
|
||||
OnlyReadLatest: true,
|
||||
Encoding: "BINARY",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
q := want.toBQ()
|
||||
got, err := bqToExternalDataConfig(&q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
o := CSVOptions{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := o.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQualifier(t *testing.T) {
|
||||
b := BigtableColumn{Qualifier: "a"}
|
||||
q := b.toBQ()
|
||||
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
|
||||
t.Errorf("got (%q, %q), want (%q, %q)",
|
||||
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
|
||||
}
|
||||
b2, err := bqToBigtableColumn(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := b2.Qualifier, b.Qualifier; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
|
||||
const (
|
||||
invalidUTF8 = "\xDF\xFF"
|
||||
invalidEncoded = "3/8"
|
||||
)
|
||||
b = BigtableColumn{Qualifier: invalidUTF8}
|
||||
q = b.toBQ()
|
||||
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
|
||||
t.Errorf("got (%q, %q), want (%q, %q)",
|
||||
q.QualifierString, "", b.Qualifier, invalidEncoded)
|
||||
}
|
||||
b2, err = bqToBigtableColumn(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := b2.Qualifier, b.Qualifier; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
75
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
75
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
|
@ -21,12 +21,6 @@ import (
|
|||
|
||||
// ExtractConfig holds the configuration for an extract job.
|
||||
type ExtractConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Src is the table from which data will be extracted.
|
||||
Src *Table
|
||||
|
||||
|
@ -35,10 +29,52 @@ type ExtractConfig struct {
|
|||
|
||||
// DisableHeader disables the printing of a header row in exported data.
|
||||
DisableHeader bool
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
|
||||
var printHeader *bool
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
printHeader = &f
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: e.Labels,
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
DestinationUris: append([]string{}, e.Dst.URIs...),
|
||||
Compression: string(e.Dst.Compression),
|
||||
DestinationFormat: string(e.Dst.DestinationFormat),
|
||||
FieldDelimiter: e.Dst.FieldDelimiter,
|
||||
SourceTable: e.Src.toBQ(),
|
||||
PrintHeader: printHeader,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
|
||||
qe := q.Extract
|
||||
return &ExtractConfig{
|
||||
Labels: q.Labels,
|
||||
Dst: &GCSReference{
|
||||
URIs: qe.DestinationUris,
|
||||
Compression: Compression(qe.Compression),
|
||||
DestinationFormat: DataFormat(qe.DestinationFormat),
|
||||
FileConfig: FileConfig{
|
||||
CSVOptions: CSVOptions{
|
||||
FieldDelimiter: qe.FieldDelimiter,
|
||||
},
|
||||
},
|
||||
},
|
||||
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
|
||||
Src: bqToTable(qe.SourceTable, c),
|
||||
}
|
||||
}
|
||||
|
||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
|
||||
type Extractor struct {
|
||||
JobIDConfig
|
||||
ExtractConfig
|
||||
c *Client
|
||||
}
|
||||
|
@ -58,23 +94,12 @@ func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
|
|||
|
||||
// Run initiates an extract job.
|
||||
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||
var printHeader *bool
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
printHeader = &f
|
||||
}
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(e.JobID, e.AddJobIDSuffix, e.c.projectID),
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
DestinationUris: append([]string{}, e.Dst.uris...),
|
||||
Compression: string(e.Dst.Compression),
|
||||
DestinationFormat: string(e.Dst.DestinationFormat),
|
||||
FieldDelimiter: e.Dst.FieldDelimiter,
|
||||
SourceTable: e.Src.tableRefProto(),
|
||||
PrintHeader: printHeader,
|
||||
},
|
||||
},
|
||||
}
|
||||
return e.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
return e.c.insertJob(ctx, e.newJob(), nil)
|
||||
}
|
||||
|
||||
func (e *Extractor) newJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: e.JobIDConfig.createJobRef(e.c.projectID),
|
||||
Configuration: e.ExtractConfig.toBQ(),
|
||||
}
|
||||
}
|
||||
|
|
39
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
39
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
|
@ -17,7 +17,9 @@ package bigquery
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
@ -38,11 +40,15 @@ func defaultExtractJob() *bq.Job {
|
|||
}
|
||||
}
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
URIs: []string{"uri"},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
s := &testService{}
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
|
||||
|
@ -58,11 +64,15 @@ func TestExtract(t *testing.T) {
|
|||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{DisableHeader: true},
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{
|
||||
DisableHeader: true,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
|
@ -92,10 +102,17 @@ func TestExtract(t *testing.T) {
|
|||
tc.config.Src = ext.Src
|
||||
tc.config.Dst = ext.Dst
|
||||
ext.ExtractConfig = tc.config
|
||||
if _, err := ext.Run(context.Background()); err != nil {
|
||||
t.Errorf("#%d: err calling extract: %v", i, err)
|
||||
continue
|
||||
got := ext.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc, &ext.ExtractConfig,
|
||||
cmp.AllowUnexported(Table{}, Client{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
91
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
91
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
|
@ -22,6 +22,10 @@ import (
|
|||
|
||||
// A ReaderSource is a source for a load operation that gets
|
||||
// data from an io.Reader.
|
||||
//
|
||||
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
|
||||
// its internal io.Reader will be nil, so it cannot be used for a
|
||||
// subsequent load operation.
|
||||
type ReaderSource struct {
|
||||
r io.Reader
|
||||
FileConfig
|
||||
|
@ -34,9 +38,9 @@ func NewReaderSource(r io.Reader) *ReaderSource {
|
|||
return &ReaderSource{r: r}
|
||||
}
|
||||
|
||||
func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.media = r.r
|
||||
r.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
|
||||
r.FileConfig.populateLoadConfig(lc)
|
||||
return r.r
|
||||
}
|
||||
|
||||
// FileConfig contains configuration options that pertain to files, typically
|
||||
|
@ -48,29 +52,10 @@ type FileConfig struct {
|
|||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
// schema for CSV and JSON sources.
|
||||
AutoDetect bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
@ -87,26 +72,8 @@ type FileConfig struct {
|
|||
// unless the data is being loaded into a table that already exists.
|
||||
Schema Schema
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (fc *FileConfig) quote() *string {
|
||||
if fc.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if fc.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &fc.Quote
|
||||
// Additional options for CSV files.
|
||||
CSVOptions
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
||||
|
@ -120,47 +87,43 @@ func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
|||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
conf.Schema = fc.Schema.toBQ()
|
||||
}
|
||||
conf.Quote = fc.quote()
|
||||
}
|
||||
|
||||
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
|
||||
fc.SourceFormat = DataFormat(conf.SourceFormat)
|
||||
fc.AutoDetect = conf.Autodetect
|
||||
fc.MaxBadRecords = conf.MaxBadRecords
|
||||
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
|
||||
fc.Schema = bqToSchema(conf.Schema)
|
||||
fc.SkipLeadingRows = conf.SkipLeadingRows
|
||||
fc.AllowJaggedRows = conf.AllowJaggedRows
|
||||
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
|
||||
fc.Encoding = Encoding(conf.Encoding)
|
||||
fc.FieldDelimiter = conf.FieldDelimiter
|
||||
fc.CSVOptions.setQuote(conf.Quote)
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
|
||||
format := fc.SourceFormat
|
||||
if format == "" {
|
||||
// Format must be explicitly set for external data sources.
|
||||
format = CSV
|
||||
}
|
||||
// TODO(jba): support AutoDetect.
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
conf.SourceFormat = string(format)
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
conf.Schema = fc.Schema.toBQ()
|
||||
}
|
||||
if format == CSV {
|
||||
conf.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: fc.AllowJaggedRows,
|
||||
AllowQuotedNewlines: fc.AllowQuotedNewlines,
|
||||
Encoding: string(fc.Encoding),
|
||||
FieldDelimiter: fc.FieldDelimiter,
|
||||
SkipLeadingRows: fc.SkipLeadingRows,
|
||||
Quote: fc.quote(),
|
||||
}
|
||||
fc.CSVOptions.populateExternalDataConfig(conf)
|
||||
}
|
||||
}
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
)
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
|
|
80
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
80
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
|
@ -22,56 +22,36 @@ import (
|
|||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
fc := FileConfig{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := fc.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateLoadConfig(t *testing.T) {
|
||||
hyphen := "-"
|
||||
fc := FileConfig{
|
||||
var (
|
||||
hyphen = "-"
|
||||
fc = FileConfig{
|
||||
SourceFormat: CSV,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
AutoDetect: true,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
},
|
||||
Quote: hyphen,
|
||||
CSVOptions: CSVOptions{
|
||||
Quote: hyphen,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestFileConfigPopulateLoadConfig(t *testing.T) {
|
||||
want := &bq.JobConfigurationLoad{
|
||||
SourceFormat: "CSV",
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Autodetect: true,
|
||||
Encoding: "UTF-8",
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
|
@ -88,3 +68,31 @@ func TestPopulateLoadConfig(t *testing.T) {
|
|||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
|
||||
got := &bq.ExternalDataConfiguration{}
|
||||
fc.populateExternalDataConfig(got)
|
||||
|
||||
want := &bq.ExternalDataConfiguration{
|
||||
SourceFormat: "CSV",
|
||||
Autodetect: true,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: "\t",
|
||||
Quote: &hyphen,
|
||||
SkipLeadingRows: 8,
|
||||
},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
|
23
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
23
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
|
@ -14,13 +14,17 @@
|
|||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
// TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user.
|
||||
uris []string
|
||||
// URIs refer to Google Cloud Storage objects.
|
||||
URIs []string
|
||||
|
||||
FileConfig
|
||||
|
||||
|
@ -42,7 +46,7 @@ type GCSReference struct {
|
|||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{uris: uri}
|
||||
return &GCSReference{URIs: uri}
|
||||
}
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
|
@ -53,15 +57,16 @@ const (
|
|||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.job.Configuration.Load.SourceUris = gcs.uris
|
||||
gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
|
||||
lc.SourceUris = gcs.URIs
|
||||
gcs.FileConfig.populateLoadConfig(lc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration {
|
||||
func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
|
||||
conf := bq.ExternalDataConfiguration{
|
||||
Compression: string(gcs.Compression),
|
||||
SourceUris: append([]string{}, gcs.uris...),
|
||||
SourceUris: append([]string{}, gcs.URIs...),
|
||||
}
|
||||
gcs.FileConfig.populateExternalDataConfig(&conf)
|
||||
return conf
|
||||
|
|
391
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
391
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
|
@ -26,12 +26,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
|
@ -39,9 +42,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
client *Client
|
||||
dataset *Dataset
|
||||
schema = Schema{
|
||||
client *Client
|
||||
storageClient *storage.Client
|
||||
dataset *Dataset
|
||||
schema = Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "nums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{
|
||||
|
@ -49,12 +53,15 @@ var (
|
|||
}},
|
||||
}
|
||||
testTableExpiration time.Time
|
||||
datasetIDs = testutil.NewUIDSpace("dataset")
|
||||
datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
|
||||
tableIDs = testutil.NewUIDSpaceSep("table", '_')
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
initIntegrationTest()
|
||||
os.Exit(m.Run())
|
||||
cleanup := initIntegrationTest()
|
||||
r := m.Run()
|
||||
cleanup()
|
||||
os.Exit(r)
|
||||
}
|
||||
|
||||
func getClient(t *testing.T) *Client {
|
||||
|
@ -65,16 +72,16 @@ func getClient(t *testing.T) *Client {
|
|||
}
|
||||
|
||||
// If integration tests will be run, create a unique bucket for them.
|
||||
func initIntegrationTest() {
|
||||
func initIntegrationTest() func() {
|
||||
flag.Parse() // needed for testing.Short()
|
||||
if testing.Short() {
|
||||
return
|
||||
return func() {}
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, Scope)
|
||||
if ts == nil {
|
||||
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
return
|
||||
return func() {}
|
||||
}
|
||||
projID := testutil.ProjID()
|
||||
var err error
|
||||
|
@ -82,13 +89,41 @@ func initIntegrationTest() {
|
|||
if err != nil {
|
||||
log.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
dataset = client.Dataset("bigquery_integration_test")
|
||||
if err := dataset.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
log.Fatalf("creating dataset: %v", err)
|
||||
storageClient, err = storage.NewClient(ctx,
|
||||
option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl)))
|
||||
if err != nil {
|
||||
log.Fatalf("storage.NewClient: %v", err)
|
||||
}
|
||||
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
|
||||
// with underscores.
|
||||
dataset = client.Dataset(datasetIDs.New())
|
||||
if err := dataset.Create(ctx, nil); err != nil {
|
||||
log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
|
||||
}
|
||||
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
|
||||
return func() {
|
||||
if err := deleteDataset(ctx, dataset); err != nil {
|
||||
log.Printf("could not delete %s", dataset.DatasetID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteDataset(ctx context.Context, ds *Dataset) error {
|
||||
it := ds.Tables(ctx)
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tbl.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ds.Delete(ctx)
|
||||
}
|
||||
func TestIntegration_TableCreate(t *testing.T) {
|
||||
// Check that creating a record field with an empty schema is an error.
|
||||
if client == nil {
|
||||
|
@ -197,8 +232,7 @@ func TestIntegration_DatasetCreate(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
uid := strings.Replace(datasetIDs.New(), "-", "_", -1)
|
||||
ds := client.Dataset(uid)
|
||||
ds := client.Dataset(datasetIDs.New())
|
||||
wmd := &DatasetMetadata{Name: "name", Location: "EU"}
|
||||
err := ds.Create(ctx, wmd)
|
||||
if err != nil {
|
||||
|
@ -251,12 +285,12 @@ func TestIntegration_DatasetDelete(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ds := client.Dataset("delete_test")
|
||||
if err := ds.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
t.Fatalf("creating dataset %s: %v", ds, err)
|
||||
ds := client.Dataset(datasetIDs.New())
|
||||
if err := ds.Create(ctx, nil); err != nil {
|
||||
t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
|
||||
}
|
||||
if err := ds.Delete(ctx); err != nil {
|
||||
t.Fatalf("deleting dataset %s: %v", ds, err)
|
||||
t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -340,6 +374,38 @@ func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetUpdateAccess(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
md, err := dataset.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origAccess := append([]*AccessEntry(nil), md.Access...)
|
||||
newEntry := &AccessEntry{
|
||||
Role: ReaderRole,
|
||||
Entity: "Joe@example.com",
|
||||
EntityType: UserEmailEntity,
|
||||
}
|
||||
newAccess := append(md.Access, newEntry)
|
||||
dm := DatasetMetadataToUpdate{Access: newAccess}
|
||||
md, err = dataset.Update(ctx, dm, md.ETag)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag)
|
||||
if err != nil {
|
||||
t.Log("could not restore dataset access list")
|
||||
}
|
||||
}()
|
||||
if diff := testutil.Diff(md.Access, newAccess); diff != "" {
|
||||
t.Fatalf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
|
@ -349,8 +415,6 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// TODO(jba): use a separate dataset for each test run so
|
||||
// tests don't interfere with each other.
|
||||
var dm DatasetMetadataToUpdate
|
||||
dm.SetLabel("label", "value")
|
||||
md, err = dataset.Update(ctx, dm, "")
|
||||
|
@ -371,6 +435,34 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_TableUpdateLabels(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
var tm TableMetadataToUpdate
|
||||
tm.SetLabel("label", "value")
|
||||
md, err := table.Update(ctx, tm, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := md.Labels["label"], "value"; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
tm = TableMetadataToUpdate{}
|
||||
tm.DeleteLabel("label")
|
||||
md, err = table.Update(ctx, tm, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := md.Labels["label"]; ok {
|
||||
t.Error("label still present after deletion")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Tables(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
|
@ -450,7 +542,6 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
|
||||
// Query the table.
|
||||
q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
|
||||
q.UseStandardSQL = true
|
||||
q.DefaultProjectID = dataset.ProjectID
|
||||
q.DefaultDatasetID = dataset.DatasetID
|
||||
|
||||
|
@ -465,11 +556,16 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if job1.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
job2, err := client.JobFromID(ctx, job1.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if job2.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
rit, err = job2.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -574,10 +670,9 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 4, 5, 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 0, time.UTC)
|
||||
tm := civil.Time{15, 4, 5, 6000}
|
||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
|
||||
dtm := civil.DateTime{d, tm}
|
||||
|
||||
d2 := civil.Date{1994, 5, 15}
|
||||
tm2 := civil.Time{1, 2, 4, 0}
|
||||
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
||||
|
@ -660,12 +755,16 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||
}
|
||||
sort.Sort(byName(got))
|
||||
|
||||
// Compare times to the microsecond.
|
||||
timeEq := func(x, y time.Time) bool {
|
||||
return x.Round(time.Microsecond).Equal(y.Round(time.Microsecond))
|
||||
}
|
||||
// BigQuery does not elide nils. It reports an error for nil fields.
|
||||
for i, g := range got {
|
||||
if i >= len(want) {
|
||||
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
|
||||
} else if w := want[i]; !testutil.Equal(g, w) {
|
||||
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w))
|
||||
} else if diff := testutil.Diff(g, want[i], cmp.Comparer(timeEq)); diff != "" {
|
||||
t.Errorf("%d: got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -814,14 +913,35 @@ func TestIntegration_Load(t *testing.T) {
|
|||
rs := NewReaderSource(r)
|
||||
loader := table.LoaderFrom(rs)
|
||||
loader.WriteDisposition = WriteTruncate
|
||||
loader.Labels = map[string]string{"test": "go"}
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if job.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config, ok := conf.(*LoadConfig)
|
||||
if !ok {
|
||||
t.Fatalf("got %T, want LoadConfig", conf)
|
||||
}
|
||||
diff := testutil.Diff(config, &loader.LoadConfig,
|
||||
cmp.AllowUnexported(Table{}),
|
||||
cmpopts.IgnoreUnexported(Client{}, ReaderSource{}),
|
||||
// returned schema is at top level, not in the config
|
||||
cmpopts.IgnoreFields(FileConfig{}, "Schema"))
|
||||
if diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "reader load", table.Read(ctx), wantRows)
|
||||
|
||||
}
|
||||
|
||||
func TestIntegration_DML(t *testing.T) {
|
||||
|
@ -829,22 +949,30 @@ func TestIntegration_DML(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
// Retry insert; sometimes it fails with INTERNAL.
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec)
|
||||
VALUES ('a', [0], STRUCT<BOOL>(TRUE)),
|
||||
('b', [1], STRUCT<BOOL>(FALSE)),
|
||||
('c', [2], STRUCT<BOOL>(TRUE))`,
|
||||
table.DatasetID, table.TableID)
|
||||
if err := dmlInsert(ctx, sql); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", []Value{int64(0)}, []Value{true}},
|
||||
[]Value{"b", []Value{int64(1)}, []Value{false}},
|
||||
[]Value{"c", []Value{int64(2)}, []Value{true}},
|
||||
}
|
||||
checkRead(t, "DML", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func dmlInsert(ctx context.Context, sql string) error {
|
||||
// Retry insert; sometimes it fails with INTERNAL.
|
||||
return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
|
||||
// Use DML to insert.
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", []Value{int64(0)}, []Value{true}},
|
||||
[]Value{"b", []Value{int64(1)}, []Value{false}},
|
||||
[]Value{"c", []Value{int64(2)}, []Value{true}},
|
||||
}
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, nums, rec) "+
|
||||
"VALUES ('a', [0], STRUCT<BOOL>(TRUE)), ('b', [1], STRUCT<BOOL>(FALSE)), ('c', [2], STRUCT<BOOL>(TRUE))",
|
||||
table.TableID)
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
q := client.Query(sql)
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
||||
|
@ -853,18 +981,13 @@ func TestIntegration_DML(t *testing.T) {
|
|||
return false, err
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
fmt.Printf("wait: %v\n", err)
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
||||
return true, err // fail on 4xx
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
|
||||
// Stop on read error, because that has never been flaky.
|
||||
return true, errors.New(msg)
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_TimeTypes(t *testing.T) {
|
||||
|
@ -882,10 +1005,11 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{12, 30, 0, 0}
|
||||
tm := civil.Time{12, 30, 0, 6000}
|
||||
dtm := civil.DateTime{d, tm}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
wantRows := [][]Value{
|
||||
[]Value{d, tm, civil.DateTime{d, tm}, ts},
|
||||
[]Value{d, tm, dtm, ts},
|
||||
}
|
||||
upl := table.Uploader()
|
||||
if err := upl.Put(ctx, []*ValuesSaver{
|
||||
|
@ -899,16 +1023,11 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||
|
||||
// SQL wants DATETIMEs with a space between date and time, but the service
|
||||
// returns them in RFC3339 form, with a "T" between.
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+
|
||||
"VALUES ('%s', '%s', '%s %s', '%s')",
|
||||
table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05"))
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+
|
||||
"VALUES ('%s', '%s', '%s', '%s')",
|
||||
table.DatasetID, table.TableID,
|
||||
d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
|
||||
if err := dmlInsert(ctx, query); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantRows = append(wantRows, wantRows[0])
|
||||
|
@ -959,7 +1078,6 @@ func TestIntegration_StandardQuery(t *testing.T) {
|
|||
}
|
||||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
q.UseStandardSQL = true
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1045,7 +1163,14 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
|||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
q.Parameters = c.parameters
|
||||
it, err := q.Read(ctx)
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if job.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1053,6 +1178,131 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_QueryDryRun(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
q := client.Query("SELECT word from " + stdName + " LIMIT 10")
|
||||
q.DryRun = true
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := job.LastStatus()
|
||||
if s.State != Done {
|
||||
t.Errorf("state is %v, expected Done", s.State)
|
||||
}
|
||||
if s.Statistics == nil {
|
||||
t.Fatal("no statistics")
|
||||
}
|
||||
if s.Statistics.Details.(*QueryStatistics).Schema == nil {
|
||||
t.Fatal("no schema")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ExtractExternal(t *testing.T) {
|
||||
// Create a table, extract it to GCS, then query it externally.
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
schema := Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "num", Type: IntegerFieldType},
|
||||
}
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Insert table data.
|
||||
sql := fmt.Sprintf(`INSERT %s.%s (name, num)
|
||||
VALUES ('a', 1), ('b', 2), ('c', 3)`,
|
||||
table.DatasetID, table.TableID)
|
||||
if err := dmlInsert(ctx, sql); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Extract to a GCS object as CSV.
|
||||
bucketName := testutil.ProjID()
|
||||
objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
|
||||
uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
|
||||
defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
|
||||
gr := NewGCSReference(uri)
|
||||
gr.DestinationFormat = CSV
|
||||
e := table.ExtractorTo(gr)
|
||||
job, err := e.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config, ok := conf.(*ExtractConfig)
|
||||
if !ok {
|
||||
t.Fatalf("got %T, want ExtractConfig", conf)
|
||||
}
|
||||
diff := testutil.Diff(config, &e.ExtractConfig,
|
||||
cmp.AllowUnexported(Table{}),
|
||||
cmpopts.IgnoreUnexported(Client{}))
|
||||
if diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
edc := &ExternalDataConfig{
|
||||
SourceFormat: CSV,
|
||||
SourceURIs: []string{uri},
|
||||
Schema: schema,
|
||||
Options: &CSVOptions{SkipLeadingRows: 1},
|
||||
}
|
||||
// Query that CSV file directly.
|
||||
q := client.Query("SELECT * FROM csv")
|
||||
q.TableDefinitions = map[string]ExternalData{"csv": edc}
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(1)},
|
||||
[]Value{"b", int64(2)},
|
||||
[]Value{"c", int64(3)},
|
||||
}
|
||||
iter, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "external query", iter, wantRows)
|
||||
|
||||
// Make a table pointing to the file, and query it.
|
||||
// BigQuery does not allow a Table.Read on an external table.
|
||||
table = dataset.Table(tableIDs.New())
|
||||
err = table.Create(context.Background(), &TableMetadata{
|
||||
Schema: schema,
|
||||
ExpirationTime: testTableExpiration,
|
||||
ExternalDataConfig: edc,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
|
||||
iter, err = q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "external table", iter, wantRows)
|
||||
|
||||
// While we're here, check that the table metadata is correct.
|
||||
md, err := table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// One difference: since BigQuery returns the schema as part of the ordinary
|
||||
// table metadata, it does not populate ExternalDataConfig.Schema.
|
||||
md.ExternalDataConfig.Schema = md.Schema
|
||||
if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" {
|
||||
t.Errorf("got=-, want=+\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ReadNullIntoStruct(t *testing.T) {
|
||||
// Reading a null into a struct field should return an error (not panic).
|
||||
if client == nil {
|
||||
|
@ -1167,31 +1417,30 @@ func TestIntegration_ListJobs(t *testing.T) {
|
|||
|
||||
// About all we can do is list a few jobs.
|
||||
const max = 20
|
||||
var jis []JobInfo
|
||||
var jobs []*Job
|
||||
it := client.Jobs(ctx)
|
||||
for {
|
||||
ji, err := it.Next()
|
||||
job, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jis = append(jis, ji)
|
||||
if len(jis) >= max {
|
||||
jobs = append(jobs, job)
|
||||
if len(jobs) >= max {
|
||||
break
|
||||
}
|
||||
}
|
||||
// We expect that there is at least one job in the last few months.
|
||||
if len(jis) == 0 {
|
||||
if len(jobs) == 0 {
|
||||
t.Fatal("did not get any jobs")
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new, temporary table with a unique name and the given schema.
|
||||
func newTable(t *testing.T, s Schema) *Table {
|
||||
name := fmt.Sprintf("t%d", time.Now().UnixNano())
|
||||
table := dataset.Table(name)
|
||||
table := dataset.Table(tableIDs.New())
|
||||
err := table.Create(context.Background(), &TableMetadata{
|
||||
Schema: s,
|
||||
ExpirationTime: testTableExpiration,
|
||||
|
@ -1268,7 +1517,7 @@ func hasStatusCode(err error, code int) bool {
|
|||
func wait(ctx context.Context, job *Job) error {
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting job status: %v", err)
|
||||
return err
|
||||
}
|
||||
if status.Err() != nil {
|
||||
return fmt.Errorf("job status error: %#v", status.Err())
|
||||
|
|
94
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
94
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
|
@ -19,20 +19,15 @@ import (
|
|||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// A pageFetcher returns a page of rows, starting from the row specified by token.
|
||||
type pageFetcher interface {
|
||||
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
|
||||
setPaging(*pagingConf)
|
||||
}
|
||||
|
||||
func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator {
|
||||
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
|
||||
it := &RowIterator{
|
||||
ctx: ctx,
|
||||
service: s,
|
||||
pf: pf,
|
||||
ctx: ctx,
|
||||
table: t,
|
||||
pf: pf,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
|
@ -44,7 +39,7 @@ func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator
|
|||
// A RowIterator provides access to the result of a BigQuery lookup.
|
||||
type RowIterator struct {
|
||||
ctx context.Context
|
||||
service service
|
||||
table *Table
|
||||
pf pageFetcher
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
@ -135,16 +130,7 @@ func isStructPtr(x interface{}) bool {
|
|||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
pc := &pagingConf{}
|
||||
if pageSize > 0 {
|
||||
pc.recordsPerRequest = int64(pageSize)
|
||||
pc.setRecordsPerRequest = true
|
||||
}
|
||||
if pageToken == "" {
|
||||
pc.startIndex = it.StartIndex
|
||||
}
|
||||
it.pf.setPaging(pc)
|
||||
res, err := it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -152,3 +138,69 @@ func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||
it.schema = res.schema
|
||||
return res.pageToken, nil
|
||||
}
|
||||
|
||||
// A pageFetcher returns a page of rows from a destination table.
|
||||
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
|
||||
|
||||
type fetchPageResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
// fetchPage gets a page of rows from t.
|
||||
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
errc := make(chan error, 1)
|
||||
if schema != nil {
|
||||
errc <- nil
|
||||
} else {
|
||||
go func() {
|
||||
var bqt *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
if err == nil && bqt.Schema != nil {
|
||||
schema = bqToSchema(bqt.Schema)
|
||||
}
|
||||
errc <- err
|
||||
}()
|
||||
}
|
||||
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
call.PageToken(pageToken)
|
||||
} else {
|
||||
call.StartIndex(startIndex)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(pageSize)
|
||||
}
|
||||
var res *bq.TableDataList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = <-errc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows, err := convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fetchPageResult{
|
||||
pageToken: res.PageToken,
|
||||
rows: rows,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: schema,
|
||||
}, nil
|
||||
}
|
||||
|
|
55
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
55
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
|
@ -26,27 +26,24 @@ import (
|
|||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *readDataResult // The result to return.
|
||||
err error // The error to return.
|
||||
result *fetchPageResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
|
||||
err error
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
call, ok := pf.fetchResponses[token]
|
||||
func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
|
||||
call, ok := pf.fetchResponses[pageToken]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", token)
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) setPaging(pc *pagingConf) {}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
var (
|
||||
iiSchema = Schema{
|
||||
|
@ -72,7 +69,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
schema: Schema{},
|
||||
|
@ -86,7 +83,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
|
@ -100,7 +97,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over single page with different schema",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{"1", 2}, {"11", 12}},
|
||||
schema: siSchema,
|
||||
|
@ -114,14 +111,14 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -135,21 +132,21 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -163,7 +160,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
|
@ -173,7 +170,7 @@ func TestIterator(t *testing.T) {
|
|||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -190,14 +187,14 @@ func TestIterator(t *testing.T) {
|
|||
pageToken: "a",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -213,21 +210,21 @@ func TestIterator(t *testing.T) {
|
|||
pageToken: "b",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{},
|
||||
result: &fetchPageResult{},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
|
@ -241,7 +238,7 @@ func TestIterator(t *testing.T) {
|
|||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
|
@ -291,7 +288,7 @@ func TestNextDuringErrorState(t *testing.T) {
|
|||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
|
@ -309,7 +306,7 @@ func TestNextAfterFinished(t *testing.T) {
|
|||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
|
@ -320,7 +317,7 @@ func TestNextAfterFinished(t *testing.T) {
|
|||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
|
@ -334,7 +331,7 @@ func TestNextAfterFinished(t *testing.T) {
|
|||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
|
|
377
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
377
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
|
@ -26,6 +26,7 @@ import (
|
|||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
|
@ -35,20 +36,19 @@ type Job struct {
|
|||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
destinationTable *bq.TableReference // table to read query results from
|
||||
config *bq.JobConfiguration
|
||||
lastStatus *JobStatus
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
job, err := c.service.getJob(ctx, c.projectID, id)
|
||||
bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
return bqToJob(bqjob, c)
|
||||
}
|
||||
|
||||
// ID returns the job's ID.
|
||||
|
@ -80,20 +80,62 @@ type JobStatus struct {
|
|||
Statistics *JobStatistics
|
||||
}
|
||||
|
||||
// JobConfig contains configuration information for a job. It is implemented by
|
||||
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
|
||||
type JobConfig interface {
|
||||
isJobConfig()
|
||||
}
|
||||
|
||||
func (*CopyConfig) isJobConfig() {}
|
||||
func (*ExtractConfig) isJobConfig() {}
|
||||
func (*LoadConfig) isJobConfig() {}
|
||||
func (*QueryConfig) isJobConfig() {}
|
||||
|
||||
// Config returns the configuration information for j.
|
||||
func (j *Job) Config() (JobConfig, error) {
|
||||
return bqToJobConfig(j.config, j.c)
|
||||
}
|
||||
|
||||
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
|
||||
switch {
|
||||
case q == nil:
|
||||
return nil, nil
|
||||
case q.Copy != nil:
|
||||
return bqToCopyConfig(q, c), nil
|
||||
case q.Extract != nil:
|
||||
return bqToExtractConfig(q, c), nil
|
||||
case q.Load != nil:
|
||||
return bqToLoadConfig(q, c), nil
|
||||
case q.Query != nil:
|
||||
return bqToQueryConfig(q, c)
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// JobIDConfig describes how to create an ID for a job.
|
||||
type JobIDConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
}
|
||||
|
||||
// createJobRef creates a JobReference.
|
||||
// projectID must be non-empty.
|
||||
func createJobRef(jobID string, addJobIDSuffix bool, projectID string) *bq.JobReference {
|
||||
if jobID == "" {
|
||||
jobID = randomJobIDFn()
|
||||
} else if addJobIDSuffix {
|
||||
jobID += "-" + randomJobIDFn()
|
||||
}
|
||||
func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference {
|
||||
// We don't check whether projectID is empty; the server will return an
|
||||
// error when it encounters the resulting JobReference.
|
||||
return &bq.JobReference{
|
||||
JobId: jobID,
|
||||
ProjectId: projectID,
|
||||
jr := &bq.JobReference{ProjectId: projectID}
|
||||
if j.JobID == "" {
|
||||
jr.JobId = randomIDFn()
|
||||
} else if j.AddJobIDSuffix {
|
||||
jr.JobId = j.JobID + "-" + randomIDFn()
|
||||
} else {
|
||||
jr.JobId = j.JobID
|
||||
}
|
||||
return jr
|
||||
}
|
||||
|
||||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
@ -104,11 +146,15 @@ var (
|
|||
)
|
||||
|
||||
// For testing.
|
||||
var randomJobIDFn = randomJobID
|
||||
var randomIDFn = randomID
|
||||
|
||||
func randomJobID() string {
|
||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for suffixes.
|
||||
var b [27]byte
|
||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
|
||||
// suffixes.
|
||||
const randomIDLen = 27
|
||||
|
||||
func randomID() string {
|
||||
// This is used for both job IDs and insert IDs.
|
||||
var b [randomIDLen]byte
|
||||
rngMu.Lock()
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = alphanum[rng.Intn(len(alphanum))]
|
||||
|
@ -128,33 +174,43 @@ func (s *JobStatus) Err() error {
|
|||
return s.err
|
||||
}
|
||||
|
||||
// Fill in the client field of Tables in the statistics.
|
||||
func (s *JobStatus) setClient(c *Client) {
|
||||
if s.Statistics == nil {
|
||||
return
|
||||
}
|
||||
if qs, ok := s.Statistics.Details.(*QueryStatistics); ok {
|
||||
for _, t := range qs.ReferencedTables {
|
||||
t.c = c
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js.setClient(j.c)
|
||||
return js, nil
|
||||
if err := j.setStatus(bqjob.Status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.setStatistics(bqjob.Statistics, j.c)
|
||||
return j.lastStatus, nil
|
||||
}
|
||||
|
||||
// LastStatus returns the most recently retrieved status of the job. The status is
|
||||
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
|
||||
// Call Job.Status to get the most up-to-date information about a job.
|
||||
func (j *Job) LastStatus() *JobStatus {
|
||||
return j.lastStatus
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
return j.c.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Wait blocks until the job or the context is done. It returns the final status
|
||||
|
@ -163,9 +219,9 @@ func (j *Job) Cancel(ctx context.Context) error {
|
|||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
if j.isQuery {
|
||||
if j.isQuery() {
|
||||
// We can avoid polling for query jobs.
|
||||
if _, err := j.c.service.waitForQuery(ctx, j.projectID, j.jobID); err != nil {
|
||||
if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: extra RPC even if you just want to wait for the query to finish.
|
||||
|
@ -196,30 +252,54 @@ func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
|||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return j.read(ctx, j.waitForQuery, fetchPage)
|
||||
}
|
||||
|
||||
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) {
|
||||
if !j.isQuery() {
|
||||
return nil, errors.New("bigquery: cannot read from a non-query job")
|
||||
}
|
||||
var projectID string
|
||||
if j.destinationTable != nil {
|
||||
projectID = j.destinationTable.ProjectId
|
||||
} else {
|
||||
projectID = j.c.projectID
|
||||
destTable := j.config.Query.DestinationTable
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if destTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
}
|
||||
|
||||
schema, err := j.c.service.waitForQuery(ctx, projectID, j.jobID)
|
||||
projectID := destTable.ProjectId
|
||||
schema, err := waitForQuery(ctx, projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if j.destinationTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
dt := bqToTable(destTable, j.c)
|
||||
it := newRowIterator(ctx, dt, pf)
|
||||
it.schema = schema
|
||||
return it, nil
|
||||
}
|
||||
|
||||
// waitForQuery waits for the query job to complete and returns its schema.
|
||||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0)
|
||||
setClientHeader(call.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
return newRowIterator(ctx, j.c.service, &readTableConf{
|
||||
projectID: j.destinationTable.ProjectId,
|
||||
datasetID: j.destinationTable.DatasetId,
|
||||
tableID: j.destinationTable.TableId,
|
||||
schema: schema,
|
||||
}), nil
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = call.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
// JobStatistics contains statistics about a job.
|
||||
|
@ -373,12 +453,6 @@ func (c *Client) Jobs(ctx context.Context) *JobIterator {
|
|||
return it
|
||||
}
|
||||
|
||||
// A JobInfo consists of a Job and a JobStatus.
|
||||
type JobInfo struct {
|
||||
Job *Job
|
||||
Status *JobStatus
|
||||
}
|
||||
|
||||
// JobIterator iterates over jobs in a project.
|
||||
type JobIterator struct {
|
||||
ProjectID string // Project ID of the jobs to list. Default is the client's project.
|
||||
|
@ -389,14 +463,14 @@ type JobIterator struct {
|
|||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []JobInfo
|
||||
items []*Job
|
||||
}
|
||||
|
||||
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *JobIterator) Next() (JobInfo, error) {
|
||||
func (it *JobIterator) Next() (*Job, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return JobInfo{}, err
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
|
@ -417,14 +491,179 @@ func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||
default:
|
||||
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
|
||||
}
|
||||
jobInfos, nextPageToken, err := it.c.service.listJobs(it.ctx, it.ProjectID, pageSize, pageToken, it.AllUsers, st)
|
||||
|
||||
req := it.c.bqs.Jobs.List(it.ProjectID).
|
||||
Context(it.ctx).
|
||||
PageToken(pageToken).
|
||||
Projection("full").
|
||||
AllUsers(it.AllUsers)
|
||||
if st != "" {
|
||||
req.StateFilter(st)
|
||||
}
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, ji := range jobInfos {
|
||||
ji.Job.c = it.c
|
||||
ji.Status.setClient(it.c)
|
||||
it.items = append(it.items, ji)
|
||||
for _, j := range res.Jobs {
|
||||
job, err := convertListedJob(j, it.c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, job)
|
||||
}
|
||||
return nextPageToken, nil
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
|
||||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
|
||||
}
|
||||
|
||||
func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||
var job *bq.Job
|
||||
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
|
||||
if len(fields) > 0 {
|
||||
call = call.Fields(fields...)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
job, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
|
||||
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c)
|
||||
}
|
||||
|
||||
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) {
|
||||
j := &Job{
|
||||
projectID: qr.ProjectId,
|
||||
jobID: qr.JobId,
|
||||
c: c,
|
||||
}
|
||||
j.setConfig(qc)
|
||||
if err := j.setStatus(qs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.setStatistics(qt, c)
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (j *Job) setConfig(config *bq.JobConfiguration) {
|
||||
if config == nil {
|
||||
return
|
||||
}
|
||||
j.config = config
|
||||
}
|
||||
|
||||
func (j *Job) isQuery() bool {
|
||||
return j.config != nil && j.config.Query != nil
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func (j *Job) setStatus(qs *bq.JobStatus) error {
|
||||
if qs == nil {
|
||||
return nil
|
||||
}
|
||||
state, ok := stateMap[qs.State]
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected job state: %v", qs.State)
|
||||
}
|
||||
j.lastStatus = &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
|
||||
j.lastStatus.err = err
|
||||
}
|
||||
for _, ep := range qs.Errors {
|
||||
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
|
||||
if s == nil || j.lastStatus == nil {
|
||||
return
|
||||
}
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, bqToTable(tr, c))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: bqToSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
j.lastStatus.Statistics = js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
29
vendor/cloud.google.com/go/bigquery/job_test.go
generated
vendored
29
vendor/cloud.google.com/go/bigquery/job_test.go
generated
vendored
|
@ -18,12 +18,11 @@ import (
|
|||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestCreateJobRef(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
for _, test := range []struct {
|
||||
jobID string
|
||||
addJobIDSuffix bool
|
||||
|
@ -50,7 +49,8 @@ func TestCreateJobRef(t *testing.T) {
|
|||
want: "foo-RANDOM",
|
||||
},
|
||||
} {
|
||||
jr := createJobRef(test.jobID, test.addJobIDSuffix, "projectID")
|
||||
jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix}
|
||||
jr := jc.createJobRef("projectID")
|
||||
got := jr.JobId
|
||||
if got != test.want {
|
||||
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
|
||||
|
@ -58,10 +58,10 @@ func TestCreateJobRef(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func fixRandomJobID(s string) func() {
|
||||
prev := randomJobIDFn
|
||||
randomJobIDFn = func() string { return s }
|
||||
return func() { randomJobIDFn = prev }
|
||||
func fixRandomID(s string) func() {
|
||||
prev := randomIDFn
|
||||
randomIDFn = func() string { return s }
|
||||
return func() { randomIDFn = prev }
|
||||
}
|
||||
|
||||
func checkJob(t *testing.T, i int, got, want *bq.Job) {
|
||||
|
@ -78,18 +78,3 @@ func checkJob(t *testing.T, i int, got, want *bq.Job) {
|
|||
t.Errorf("#%d: (got=-, want=+) %s", i, d)
|
||||
}
|
||||
}
|
||||
|
||||
type testService struct {
|
||||
*bq.Job
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
s.Job = conf.job
|
||||
return &Job{}, nil
|
||||
}
|
||||
|
||||
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
return &JobStatus{State: Done}, nil
|
||||
}
|
||||
|
|
77
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
77
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
|
@ -15,18 +15,14 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// LoadConfig holds the configuration for a load job.
|
||||
type LoadConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Src is the source from which data will be loaded.
|
||||
Src LoadSource
|
||||
|
||||
|
@ -40,10 +36,53 @@ type LoadConfig struct {
|
|||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// If non-nil, the destination table is partitioned by time.
|
||||
TimePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||
config := &bq.JobConfiguration{
|
||||
Labels: l.Labels,
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
DestinationTable: l.Dst.toBQ(),
|
||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||
},
|
||||
}
|
||||
media := l.Src.populateLoadConfig(config.Load)
|
||||
return config, media
|
||||
}
|
||||
|
||||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
||||
lc := &LoadConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||
}
|
||||
var fc *FileConfig
|
||||
if len(q.Load.SourceUris) == 0 {
|
||||
s := NewReaderSource(nil)
|
||||
fc = &s.FileConfig
|
||||
lc.Src = s
|
||||
} else {
|
||||
s := NewGCSReference(q.Load.SourceUris...)
|
||||
fc = &s.FileConfig
|
||||
lc.Src = s
|
||||
}
|
||||
bqPopulateFileConfig(q.Load, fc)
|
||||
return lc
|
||||
}
|
||||
|
||||
// A Loader loads data from Google Cloud Storage into a BigQuery table.
|
||||
type Loader struct {
|
||||
JobIDConfig
|
||||
LoadConfig
|
||||
c *Client
|
||||
}
|
||||
|
@ -54,7 +93,8 @@ type Loader struct {
|
|||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
|
||||
// objects, and ReaderSource, for data read from an io.Reader.
|
||||
type LoadSource interface {
|
||||
populateInsertJobConfForLoad(conf *insertJobConf)
|
||||
// populates config, returns media
|
||||
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
|
||||
}
|
||||
|
||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
|
||||
|
@ -73,17 +113,14 @@ func (t *Table) LoaderFrom(src LoadSource) *Loader {
|
|||
|
||||
// Run initiates a load job.
|
||||
func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(l.JobID, l.AddJobIDSuffix, l.c.projectID),
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
},
|
||||
},
|
||||
}
|
||||
conf := &insertJobConf{job: job}
|
||||
l.Src.populateInsertJobConfForLoad(conf)
|
||||
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
|
||||
return l.c.insertJob(ctx, conf)
|
||||
job, media := l.newJob()
|
||||
return l.c.insertJob(ctx, job, media)
|
||||
}
|
||||
|
||||
func (l *Loader) newJob() (*bq.Job, io.Reader) {
|
||||
config, media := l.LoadConfig.toBQ()
|
||||
return &bq.Job{
|
||||
JobReference: l.JobIDConfig.createJobRef(l.c.projectID),
|
||||
Configuration: config,
|
||||
}, media
|
||||
}
|
||||
|
|
38
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
38
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
|
@ -17,8 +17,11 @@ package bigquery
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
@ -67,12 +70,13 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
|
|||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{projectID: "client-project-id"}
|
||||
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src LoadSource
|
||||
jobID string
|
||||
config LoadConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
|
@ -82,17 +86,24 @@ func TestLoad(t *testing.T) {
|
|||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobID: "ajob",
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
JobID: "ajob",
|
||||
Labels: map[string]string{"a": "b"},
|
||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: 1234,
|
||||
}
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "client-project-id",
|
||||
|
@ -211,16 +222,23 @@ func TestLoad(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
loader := tc.dst.LoaderFrom(tc.src)
|
||||
loader.JobID = tc.jobID
|
||||
tc.config.Src = tc.src
|
||||
tc.config.Dst = tc.dst
|
||||
loader.LoadConfig = tc.config
|
||||
if _, err := loader.Run(context.Background()); err != nil {
|
||||
t.Errorf("#%d: err calling Loader.Run: %v", i, err)
|
||||
continue
|
||||
got, _ := loader.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
|
||||
cmp.AllowUnexported(Table{}, Client{}),
|
||||
cmpopts.IgnoreUnexported(ReaderSource{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
103
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
103
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
|
@ -77,8 +78,9 @@ type QueryParameter struct {
|
|||
Name string
|
||||
|
||||
// Value is the value of the parameter.
|
||||
// The following Go types are supported, with their corresponding
|
||||
// Bigquery types:
|
||||
//
|
||||
// When you create a QueryParameter to send to BigQuery, the following Go types
|
||||
// are supported, with their corresponding Bigquery types:
|
||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
|
||||
// Note that uint, uint64 and uintptr are not supported, because
|
||||
// they may contain values that cannot fit into a 64-bit signed integer.
|
||||
|
@ -89,10 +91,17 @@ type QueryParameter struct {
|
|||
// time.Time: TIMESTAMP
|
||||
// Arrays and slices of the above.
|
||||
// Structs of the above. Only the exported fields are used.
|
||||
//
|
||||
// When a QueryParameter is returned inside a QueryConfig from a call to
|
||||
// Job.Config:
|
||||
// Integers are of type int64.
|
||||
// Floating-point values are of type float64.
|
||||
// Arrays are of type []interface{}, regardless of the array element type.
|
||||
// Structs are of type map[string]interface{}.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (p QueryParameter) toRaw() (*bq.QueryParameter, error) {
|
||||
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
|
||||
pv, err := paramValue(reflect.ValueOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -189,12 +198,11 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
|||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
res.Value = civilTimeParamString(v.Interface().(civil.Time))
|
||||
res.Value = CivilTimeString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
case typeOfDateTime:
|
||||
dt := v.Interface().(civil.DateTime)
|
||||
res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time)
|
||||
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
|
||||
return res, nil
|
||||
|
||||
case typeOfGoTime:
|
||||
|
@ -254,12 +262,81 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func civilTimeParamString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
|
||||
p := QueryParameter{Name: q.Name}
|
||||
val, err := convertParamValue(q.ParameterValue, q.ParameterType)
|
||||
if err != nil {
|
||||
return QueryParameter{}, err
|
||||
}
|
||||
p.Value = val
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var paramTypeToFieldType = map[string]FieldType{
|
||||
int64ParamType.Type: IntegerFieldType,
|
||||
float64ParamType.Type: FloatFieldType,
|
||||
boolParamType.Type: BooleanFieldType,
|
||||
stringParamType.Type: StringFieldType,
|
||||
bytesParamType.Type: BytesFieldType,
|
||||
dateParamType.Type: DateFieldType,
|
||||
timeParamType.Type: TimeFieldType,
|
||||
}
|
||||
|
||||
// Convert a parameter value from the service to a Go value. This is similar to, but
|
||||
// not quite the same as, converting data values.
|
||||
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
|
||||
switch qtype.Type {
|
||||
case "ARRAY":
|
||||
if qval == nil {
|
||||
return []interface{}(nil), nil
|
||||
}
|
||||
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
|
||||
case "STRUCT":
|
||||
if qval == nil {
|
||||
return map[string]interface{}(nil), nil
|
||||
}
|
||||
return convertParamStruct(qval.StructValues, qtype.StructTypes)
|
||||
case "TIMESTAMP":
|
||||
return time.Parse(timestampFormat, qval.Value)
|
||||
case "DATETIME":
|
||||
parts := strings.Fields(qval.Value)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value)
|
||||
}
|
||||
return civil.ParseDateTime(parts[0] + "T" + parts[1])
|
||||
default:
|
||||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
|
||||
}
|
||||
}
|
||||
|
||||
// convertParamArray converts a query parameter array value to a Go value. It
|
||||
// always returns a []interface{}.
|
||||
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
|
||||
var vals []interface{}
|
||||
for _, el := range elVals {
|
||||
val, err := convertParamValue(el, elType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, val)
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// convertParamValue converts a query parameter struct value into a Go value. It
|
||||
// always returns a map[string]interface{}.
|
||||
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
|
||||
vals := map[string]interface{}{}
|
||||
for _, st := range sTypes {
|
||||
if sv, ok := sVals[st.Name]; ok {
|
||||
val, err := convertParamValue(&sv, st.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals[st.Name] = val
|
||||
} else {
|
||||
vals[st.Name] = nil
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
|
287
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
287
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
|
@ -30,40 +30,79 @@ import (
|
|||
)
|
||||
|
||||
var scalarTests = []struct {
|
||||
val interface{}
|
||||
want string
|
||||
val interface{} // The Go value
|
||||
wantVal string // paramValue's desired output
|
||||
wantType *bq.QueryParameterType // paramType's desired output
|
||||
}{
|
||||
{int64(0), "0"},
|
||||
{3.14, "3.14"},
|
||||
{3.14159e-87, "3.14159e-87"},
|
||||
{true, "true"},
|
||||
{"string", "string"},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"},
|
||||
{math.NaN(), "NaN"},
|
||||
{[]byte("foo"), "Zm9v"}, // base64 encoding of "foo"
|
||||
{int64(0), "0", int64ParamType},
|
||||
{3.14, "3.14", float64ParamType},
|
||||
{3.14159e-87, "3.14159e-87", float64ParamType},
|
||||
{true, "true", boolParamType},
|
||||
{"string", "string", stringParamType},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
|
||||
{math.NaN(), "NaN", float64ParamType},
|
||||
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02"},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20"},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"},
|
||||
"2016-03-20 04:22:09.000005-01:02",
|
||||
timestampParamType},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20", dateParamType},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}},
|
||||
"2016-03-20 04:05:06.789000",
|
||||
dateTimeParamType},
|
||||
}
|
||||
|
||||
type S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
type (
|
||||
S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
)
|
||||
|
||||
type S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
var (
|
||||
s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
|
||||
var s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
s1ParamType = &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}
|
||||
|
||||
s1ParamValue = bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
|
||||
s1ParamReturnValue = map[string]interface{}{
|
||||
"A": int64(1),
|
||||
"B": map[string]interface{}{"D": "s"},
|
||||
"C": true,
|
||||
}
|
||||
)
|
||||
|
||||
func sval(s string) bq.QueryParameterValue {
|
||||
return bq.QueryParameterValue{Value: s}
|
||||
|
@ -76,7 +115,7 @@ func TestParamValueScalar(t *testing.T) {
|
|||
t.Errorf("%v: got %v, want nil", test.val, err)
|
||||
continue
|
||||
}
|
||||
want := sval(test.want)
|
||||
want := sval(test.wantVal)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
|
||||
}
|
||||
|
@ -113,19 +152,8 @@ func TestParamValueStruct(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %+v\nwant %+v", got, want)
|
||||
if !testutil.Equal(got, s1ParamValue) {
|
||||
t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,35 +169,24 @@ func TestParamValueErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParamType(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.wantType) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
|
||||
}
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want *bq.QueryParameterType
|
||||
}{
|
||||
{0, int64ParamType},
|
||||
{uint32(32767), int64ParamType},
|
||||
{3.14, float64ParamType},
|
||||
{float32(3.14), float64ParamType},
|
||||
{math.NaN(), float64ParamType},
|
||||
{true, boolParamType},
|
||||
{"", stringParamType},
|
||||
{"string", stringParamType},
|
||||
{time.Now(), timestampParamType},
|
||||
{[]byte("foo"), bytesParamType},
|
||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
|
||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
|
||||
{S1{}, &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}},
|
||||
{S1{}, s1ParamType},
|
||||
} {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
|
@ -192,17 +209,75 @@ func TestParamTypeErrors(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
func TestConvertParamValue(t *testing.T) {
|
||||
// Scalars.
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
pval, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.val, cmp.Comparer(func(t1, t2 time.Time) bool {
|
||||
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
|
||||
})) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
|
||||
ptype, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := convertParamValue(&pval, ptype)
|
||||
if err != nil {
|
||||
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.val) {
|
||||
t.Errorf("%#v: got %#v", test.val, got)
|
||||
}
|
||||
}
|
||||
// Arrays.
|
||||
for _, test := range []struct {
|
||||
pval *bq.QueryParameterValue
|
||||
want []interface{}
|
||||
}{
|
||||
{
|
||||
&bq.QueryParameterValue{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
&bq.QueryParameterValue{
|
||||
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
|
||||
},
|
||||
[]interface{}{int64(1), int64(2)},
|
||||
},
|
||||
} {
|
||||
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
|
||||
got, err := convertParamValue(test.pval, ptype)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v: %v", test.pval, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
|
||||
}
|
||||
}
|
||||
// Structs.
|
||||
got, err := convertParamValue(&s1ParamValue, s1ParamType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, s1ParamReturnValue) {
|
||||
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
timeEqualMicrosec := cmp.Comparer(func(t1, t2 time.Time) bool {
|
||||
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
|
||||
})
|
||||
c := getClient(t)
|
||||
for _, test := range scalarTests {
|
||||
gotData, gotParam, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(gotData, test.val, timeEqualMicrosec) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
|
||||
}
|
||||
if !testutil.Equal(gotParam, test.val, timeEqualMicrosec) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -210,40 +285,78 @@ func TestIntegration_ScalarParam(t *testing.T) {
|
|||
func TestIntegration_OtherParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want interface{}
|
||||
val interface{}
|
||||
wantData interface{}
|
||||
wantParam interface{}
|
||||
}{
|
||||
{[]int(nil), []Value(nil)},
|
||||
{[]int{}, []Value(nil)},
|
||||
{[]int{1, 2}, []Value{int64(1), int64(2)}},
|
||||
{[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}},
|
||||
{S1{}, []Value{int64(0), nil, false}},
|
||||
{s1, []Value{int64(1), []Value{"s"}, true}},
|
||||
{[]int(nil), []Value(nil), []interface{}(nil)},
|
||||
{[]int{}, []Value(nil), []interface{}(nil)},
|
||||
{
|
||||
[]int{1, 2},
|
||||
[]Value{int64(1), int64(2)},
|
||||
[]interface{}{int64(1), int64(2)},
|
||||
},
|
||||
{
|
||||
[3]int{1, 2, 3},
|
||||
[]Value{int64(1), int64(2), int64(3)},
|
||||
[]interface{}{int64(1), int64(2), int64(3)},
|
||||
},
|
||||
{
|
||||
S1{},
|
||||
[]Value{int64(0), nil, false},
|
||||
map[string]interface{}{
|
||||
"A": int64(0),
|
||||
"B": nil,
|
||||
"C": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
s1,
|
||||
[]Value{int64(1), []Value{"s"}, true},
|
||||
s1ParamReturnValue,
|
||||
},
|
||||
} {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
gotData, gotParam, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
|
||||
if !testutil.Equal(gotData, test.wantData) {
|
||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
|
||||
test.val, gotData, gotData, test.wantData, test.wantData)
|
||||
}
|
||||
if !testutil.Equal(gotParam, test.wantParam) {
|
||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
|
||||
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func paramRoundTrip(c *Client, x interface{}) (Value, error) {
|
||||
// paramRoundTrip passes x as a query parameter to BigQuery. It returns
|
||||
// the resulting data value from running the query and the parameter value from
|
||||
// the returned job configuration.
|
||||
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
|
||||
ctx := context.Background()
|
||||
q := c.Query("select ?")
|
||||
q.Parameters = []QueryParameter{{Value: x}}
|
||||
it, err := q.Read(context.Background())
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var val []Value
|
||||
err = it.Next(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(val) != 1 {
|
||||
return nil, errors.New("wrong number of values")
|
||||
return nil, nil, errors.New("wrong number of values")
|
||||
}
|
||||
return val[0], nil
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
|
||||
}
|
||||
|
|
220
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
220
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
|
@ -23,12 +23,6 @@ import (
|
|||
|
||||
// QueryConfig holds the configuration for a query job.
|
||||
type QueryConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Dst is the table into which the results of the query will be written.
|
||||
// If this field is nil, a temporary table will be created.
|
||||
Dst *Table
|
||||
|
@ -43,6 +37,9 @@ type QueryConfig struct {
|
|||
|
||||
// TableDefinitions describes data sources outside of BigQuery.
|
||||
// The map keys may be used as table names in the query string.
|
||||
//
|
||||
// When a QueryConfig is returned from Job.Config, the map values
|
||||
// are always of type *ExternalDataConfig.
|
||||
TableDefinitions map[string]ExternalData
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
|
@ -90,6 +87,7 @@ type QueryConfig struct {
|
|||
MaxBytesBilled int64
|
||||
|
||||
// UseStandardSQL causes the query to use standard SQL. The default.
|
||||
// Deprecated: use UseLegacySQL.
|
||||
UseStandardSQL bool
|
||||
|
||||
// UseLegacySQL causes the query to use legacy SQL.
|
||||
|
@ -101,6 +99,130 @@ type QueryConfig struct {
|
|||
// If the query uses named syntax ("@p"), then all parameters must have names.
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// If true, don't actually run this job. A valid query will return a mostly
|
||||
// empty response with some processing statistics, while an invalid query will
|
||||
// return the same error it would if it wasn't a dry run.
|
||||
//
|
||||
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
|
||||
// call LastStatus on the returned job to get statistics. Calling Status on a
|
||||
// dry-run job will fail.
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
qconf := &bq.JobConfigurationQuery{
|
||||
Query: qc.Q,
|
||||
CreateDisposition: string(qc.CreateDisposition),
|
||||
WriteDisposition: string(qc.WriteDisposition),
|
||||
AllowLargeResults: qc.AllowLargeResults,
|
||||
Priority: string(qc.Priority),
|
||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||
}
|
||||
if len(qc.TableDefinitions) > 0 {
|
||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
}
|
||||
for name, data := range qc.TableDefinitions {
|
||||
qconf.TableDefinitions[name] = data.toBQ()
|
||||
}
|
||||
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
|
||||
qconf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: qc.DefaultDatasetID,
|
||||
ProjectId: qc.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
if tier := int64(qc.MaxBillingTier); tier > 0 {
|
||||
qconf.MaximumBillingTier = &tier
|
||||
}
|
||||
f := false
|
||||
if qc.DisableQueryCache {
|
||||
qconf.UseQueryCache = &f
|
||||
}
|
||||
if qc.DisableFlattenedResults {
|
||||
qconf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
qconf.AllowLargeResults = true
|
||||
}
|
||||
if qc.UseStandardSQL && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
|
||||
}
|
||||
if qc.UseLegacySQL {
|
||||
qconf.UseLegacySql = true
|
||||
} else {
|
||||
qconf.UseLegacySql = false
|
||||
qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
if qc.Dst != nil && !qc.Dst.implicitTable() {
|
||||
qconf.DestinationTable = qc.Dst.toBQ()
|
||||
}
|
||||
for _, p := range qc.Parameters {
|
||||
qp, err := p.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qconf.QueryParameters = append(qconf.QueryParameters, qp)
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: qc.Labels,
|
||||
DryRun: qc.DryRun,
|
||||
Query: qconf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
||||
qq := q.Query
|
||||
qc := &QueryConfig{
|
||||
Labels: q.Labels,
|
||||
DryRun: q.DryRun,
|
||||
Q: qq.Query,
|
||||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
|
||||
AllowLargeResults: qq.AllowLargeResults,
|
||||
Priority: QueryPriority(qq.Priority),
|
||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||
UseLegacySQL: qq.UseLegacySql,
|
||||
UseStandardSQL: !qq.UseLegacySql,
|
||||
}
|
||||
if len(qq.TableDefinitions) > 0 {
|
||||
qc.TableDefinitions = make(map[string]ExternalData)
|
||||
}
|
||||
for name, qedc := range qq.TableDefinitions {
|
||||
edc, err := bqToExternalDataConfig(&qedc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qc.TableDefinitions[name] = edc
|
||||
}
|
||||
if qq.DefaultDataset != nil {
|
||||
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
|
||||
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
|
||||
}
|
||||
if qq.MaximumBillingTier != nil {
|
||||
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
|
||||
}
|
||||
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
|
||||
qc.DisableQueryCache = true
|
||||
}
|
||||
if qq.FlattenResults != nil && !*qq.FlattenResults {
|
||||
qc.DisableFlattenedResults = true
|
||||
}
|
||||
if qq.DestinationTable != nil {
|
||||
qc.Dst = bqToTable(qq.DestinationTable, c)
|
||||
}
|
||||
for _, qp := range qq.QueryParameters {
|
||||
p, err := bqToQueryParameter(qp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qc.Parameters = append(qc.Parameters, p)
|
||||
}
|
||||
return qc, nil
|
||||
}
|
||||
|
||||
// QueryPriority specifies a priority with which a query is to be executed.
|
||||
|
@ -113,8 +235,9 @@ const (
|
|||
|
||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
|
||||
type Query struct {
|
||||
client *Client
|
||||
JobIDConfig
|
||||
QueryConfig
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Query creates a query with string q.
|
||||
|
@ -128,83 +251,26 @@ func (c *Client) Query(q string) *Query {
|
|||
|
||||
// Run initiates a query job.
|
||||
func (q *Query) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(q.JobID, q.AddJobIDSuffix, q.client.projectID),
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{},
|
||||
},
|
||||
}
|
||||
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.insertJob(ctx, &insertJobConf{job: job})
|
||||
job, err := q.newJob()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.insertJob(ctx, job, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.isQuery = true
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error {
|
||||
conf.Query = q.Q
|
||||
|
||||
if len(q.TableDefinitions) > 0 {
|
||||
conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
func (q *Query) newJob() (*bq.Job, error) {
|
||||
config, err := q.QueryConfig.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for name, data := range q.TableDefinitions {
|
||||
conf.TableDefinitions[name] = data.externalDataConfig()
|
||||
}
|
||||
|
||||
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
|
||||
conf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: q.DefaultDatasetID,
|
||||
ProjectId: q.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
|
||||
if tier := int64(q.MaxBillingTier); tier > 0 {
|
||||
conf.MaximumBillingTier = &tier
|
||||
}
|
||||
conf.CreateDisposition = string(q.CreateDisposition)
|
||||
conf.WriteDisposition = string(q.WriteDisposition)
|
||||
conf.AllowLargeResults = q.AllowLargeResults
|
||||
conf.Priority = string(q.Priority)
|
||||
|
||||
f := false
|
||||
if q.DisableQueryCache {
|
||||
conf.UseQueryCache = &f
|
||||
}
|
||||
if q.DisableFlattenedResults {
|
||||
conf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
conf.AllowLargeResults = true
|
||||
}
|
||||
if q.MaxBytesBilled >= 1 {
|
||||
conf.MaximumBytesBilled = q.MaxBytesBilled
|
||||
}
|
||||
if q.UseStandardSQL && q.UseLegacySQL {
|
||||
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
if len(q.Parameters) > 0 && q.UseLegacySQL {
|
||||
return errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
|
||||
}
|
||||
if q.UseLegacySQL {
|
||||
conf.UseLegacySql = true
|
||||
} else {
|
||||
conf.UseLegacySql = false
|
||||
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
if q.Dst != nil && !q.Dst.implicitTable() {
|
||||
conf.DestinationTable = q.Dst.tableRefProto()
|
||||
}
|
||||
for _, p := range q.Parameters {
|
||||
qp, err := p.toRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.QueryParameters = append(conf.QueryParameters, qp)
|
||||
}
|
||||
return nil
|
||||
return &bq.Job{
|
||||
JobReference: q.JobIDConfig.createJobRef(q.client.projectID),
|
||||
Configuration: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
|
|
119
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
119
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
|
@ -17,9 +17,9 @@ package bigquery
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
@ -46,15 +46,22 @@ func defaultQueryJob() *bq.Job {
|
|||
}
|
||||
}
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
want *bq.Job
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
jobIDConfig JobIDConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
|
@ -64,21 +71,22 @@ func TestQuery(t *testing.T) {
|
|||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
Q: "query string",
|
||||
Labels: map[string]string{"a": "b"},
|
||||
DryRun: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.DryRun = true
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
JobID: "jobID",
|
||||
AddJobIDSuffix: true,
|
||||
},
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
|
||||
src: &QueryConfig{Q: "query string"},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
|
@ -244,16 +252,6 @@ func TestQuery(t *testing.T) {
|
|||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBytesBilled: -1,
|
||||
},
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
|
@ -281,24 +279,71 @@ func TestQuery(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
query := c.Query("")
|
||||
query.JobIDConfig = tc.jobIDConfig
|
||||
query.QueryConfig = *tc.src
|
||||
query.Dst = tc.dst
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
got, err := query.newJob()
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err calling query: %v", i, err)
|
||||
continue
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
// Round-trip.
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
wantConfig := query.QueryConfig
|
||||
// We set AllowLargeResults to true when DisableFlattenedResults is true.
|
||||
if wantConfig.DisableFlattenedResults {
|
||||
wantConfig.AllowLargeResults = true
|
||||
}
|
||||
// A QueryConfig with neither UseXXXSQL field set is equivalent
|
||||
// to one where UseStandardSQL = true.
|
||||
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
|
||||
wantConfig.UseStandardSQL = true
|
||||
}
|
||||
// Treat nil and empty tables the same, and ignore the client.
|
||||
tableEqual := func(t1, t2 *Table) bool {
|
||||
if t1 == nil {
|
||||
t1 = &Table{}
|
||||
}
|
||||
if t2 == nil {
|
||||
t2 = &Table{}
|
||||
}
|
||||
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
|
||||
}
|
||||
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
|
||||
// TODO(jba): see if there is a way to express this with a transformer.
|
||||
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
|
||||
q := g.toBQ()
|
||||
e, _ := bqToExternalDataConfig(&q)
|
||||
return e
|
||||
}
|
||||
externalDataEqual := func(e1, e2 ExternalData) bool {
|
||||
if r, ok := e1.(*GCSReference); ok {
|
||||
e1 = gcsRefToEDC(r)
|
||||
}
|
||||
if r, ok := e2.(*GCSReference); ok {
|
||||
e2 = gcsRefToEDC(r)
|
||||
}
|
||||
return cmp.Equal(e1, e2)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
|
||||
cmp.Comparer(tableEqual),
|
||||
cmp.Comparer(externalDataEqual),
|
||||
)
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguringQuery(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
|
||||
query := c.Query("q")
|
||||
|
@ -326,30 +371,28 @@ func TestConfiguringQuery(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Fatalf("err calling Query.Run: %v", err)
|
||||
got, err := query.newJob()
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Query.newJob: %v", err)
|
||||
}
|
||||
if diff := testutil.Diff(s.Job, want); diff != "" {
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("querying: -got +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLegacySQL(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &testService{},
|
||||
}
|
||||
c := &Client{projectID: "project-id"}
|
||||
q := c.Query("q")
|
||||
q.UseStandardSQL = true
|
||||
q.UseLegacySQL = true
|
||||
_, err := q.Run(context.Background())
|
||||
_, err := q.newJob()
|
||||
if err == nil {
|
||||
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
|
||||
}
|
||||
q = c.Query("q")
|
||||
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
|
||||
q.UseLegacySQL = true
|
||||
_, err = q.Run(context.Background())
|
||||
_, err = q.newJob()
|
||||
if err == nil {
|
||||
t.Error("Parameters and UseLegacySQL: got nil, want error")
|
||||
}
|
||||
|
|
156
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
156
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
|
@ -27,69 +27,65 @@ import (
|
|||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type readTabledataArgs struct {
|
||||
conf *readTableConf
|
||||
tok string
|
||||
type pageFetcherArgs struct {
|
||||
table *Table
|
||||
schema Schema
|
||||
startIndex uint64
|
||||
pageSize int64
|
||||
pageToken string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// pageFetcherReadStub services read requests by returning data from an in-memory list of values.
|
||||
type pageFetcherReadStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
|
||||
service
|
||||
calls []pageFetcherArgs
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readValues(tok string) *readDataResult {
|
||||
result := &readDataResult{
|
||||
pageToken: s.pageTokens[tok],
|
||||
func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
|
||||
s.calls = append(s.calls,
|
||||
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
|
||||
result := &fetchPageResult{
|
||||
pageToken: s.pageTokens[pageToken],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
func waitForQueryStub(context.Context, string) (Schema, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
ctx := context.Background()
|
||||
service := &readServiceStub{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: service,
|
||||
}
|
||||
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: c,
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
config: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
func() *RowIterator {
|
||||
return c.Dataset("dataset-id").Table("table-id").Read(ctx)
|
||||
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
|
||||
},
|
||||
func() *RowIterator {
|
||||
it, err := queryJob.Read(ctx)
|
||||
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -113,8 +109,8 @@ func TestRead(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
service.values = tc.data
|
||||
service.pageTokens = tc.pageTokens
|
||||
pf.values = tc.data
|
||||
pf.pageTokens = tc.pageTokens
|
||||
if got, ok := collectValues(t, readFunc()); ok {
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
|
@ -142,13 +138,11 @@ func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
|
|||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
},
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
|
||||
var vals []Value
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if err := it.Next(&vals); err != nil {
|
||||
|
@ -162,23 +156,16 @@ func TestNoMoreValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
||||
var errBang = errors.New("bang!")
|
||||
|
||||
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
|
||||
return nil, errBang
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &errorReadService{},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
c := &Client{projectID: "project-id"}
|
||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != errBang {
|
||||
t.Fatalf("Get: got: %v: want: %v", err, errBang)
|
||||
|
@ -187,54 +174,47 @@ func TestReadError(t *testing.T) {
|
|||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
s := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
c := &Client{projectID: "project-id"}
|
||||
tr := c.Dataset("dataset-id").Table("table-id")
|
||||
it := tr.read(context.Background(), s.fetchPage)
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
want := []pageFetcherArgs{{
|
||||
table: tr,
|
||||
pageSize: 5,
|
||||
pageToken: "",
|
||||
}}
|
||||
|
||||
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
|
||||
t.Errorf("reading (got=-, want=+):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
tr := &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: &Client{service: s},
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
c: c,
|
||||
config: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{DestinationTable: tr},
|
||||
},
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
|
@ -244,20 +224,12 @@ func TestReadQueryOptions(t *testing.T) {
|
|||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
want := []pageFetcherArgs{{
|
||||
table: bqToTable(tr, c),
|
||||
pageSize: 5,
|
||||
pageToken: "",
|
||||
}}
|
||||
|
||||
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
|
||||
}
|
||||
}
|
||||
|
|
16
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
16
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
|
@ -49,7 +49,7 @@ type FieldSchema struct {
|
|||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
||||
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
|
@ -63,21 +63,21 @@ func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
|||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
|
||||
tfs.Fields = append(tfs.Fields, f.toBQ())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) asTableSchema() *bq.TableSchema {
|
||||
func (s Schema) toBQ() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.asTableFieldSchema())
|
||||
fields = append(fields, f.toBQ())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
|
@ -87,18 +87,18 @@ func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
|||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
|
||||
fs.Schema = append(fs.Schema, bqToFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
func bqToSchema(ts *bq.TableSchema) Schema {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
s = append(s, bqToFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
4
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
|
@ -192,12 +192,12 @@ func TestSchemaConversion(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.asTableSchema()
|
||||
bqSchema := tc.schema.toBQ()
|
||||
if !testutil.Equal(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
|
||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
|
||||
}
|
||||
schema := convertTableSchema(tc.bqSchema)
|
||||
schema := bqToSchema(tc.bqSchema)
|
||||
if !testutil.Equal(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
|
|
940
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
940
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
|
@ -1,940 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// service provides an internal abstraction to isolate the generated
|
||||
// BigQuery API; most of this package uses this interface instead.
|
||||
// The single implementation, *bigqueryService, contains all the knowledge
|
||||
// of the generated BigQuery API.
|
||||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
|
||||
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
|
||||
jobCancel(ctx context.Context, projectId, jobID string) error
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
listJobs(ctx context.Context, projectId string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error)
|
||||
|
||||
// Tables
|
||||
createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error
|
||||
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
|
||||
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
|
||||
|
||||
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
|
||||
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
|
||||
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error)
|
||||
|
||||
// Table data
|
||||
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
|
||||
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
|
||||
|
||||
// Datasets
|
||||
insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error
|
||||
deleteDataset(ctx context.Context, datasetID, projectID string) error
|
||||
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
|
||||
patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error)
|
||||
|
||||
// Misc
|
||||
|
||||
// Waits for a query to complete.
|
||||
waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error)
|
||||
|
||||
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
|
||||
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
|
||||
}
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
||||
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
|
||||
s, err := bq.New(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
s.BasePath = endpoint
|
||||
|
||||
return &bigqueryService{s: s}, nil
|
||||
}
|
||||
|
||||
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
|
||||
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
|
||||
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
|
||||
for {
|
||||
var err error
|
||||
token, err = getPage(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertJobConf struct {
|
||||
job *bq.Job
|
||||
media io.Reader
|
||||
}
|
||||
|
||||
// Calls the Jobs.Insert RPC and returns a Job. Callers must set the returned Job's
|
||||
// client.
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if conf.media != nil {
|
||||
call.Media(conf.media)
|
||||
}
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if conf.job.JobReference != nil && conf.media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dt *bq.TableReference
|
||||
if qc := res.Configuration.Query; qc != nil {
|
||||
dt = qc.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: projectID,
|
||||
jobID: res.JobReference.JobId,
|
||||
destinationTable: dt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
recordsPerRequest int64
|
||||
setRecordsPerRequest bool
|
||||
|
||||
startIndex uint64
|
||||
}
|
||||
|
||||
type readTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
paging pagingConf
|
||||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
setClientHeader(req.Header())
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
errc := make(chan error, 1)
|
||||
if conf.schema != nil {
|
||||
errc <- nil
|
||||
} else {
|
||||
go func() {
|
||||
var t *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
t, err = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
if err == nil && t.Schema != nil {
|
||||
conf.schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
errc <- err
|
||||
}()
|
||||
}
|
||||
var res *bq.TableDataList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = <-errc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: conf.schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, conf.schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
req := s.s.Jobs.GetQueryResults(projectID, jobID).Context(ctx).MaxResults(0)
|
||||
setClientHeader(req.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = req.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTableSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
type insertRowsConf struct {
|
||||
templateSuffix string
|
||||
ignoreUnknownValues bool
|
||||
skipInvalidRows bool
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: conf.templateSuffix,
|
||||
IgnoreUnknownValues: conf.ignoreUnknownValues,
|
||||
SkipInvalidRows: conf.skipInvalidRows,
|
||||
}
|
||||
for _, row := range rows {
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row.Row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: row.InsertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
call := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.InsertErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs PutMultiError
|
||||
for _, e := range res.InsertErrors {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertID,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
|
||||
bqjob, err := s.getJobInternal(ctx, projectID, jobID, "configuration", "jobReference")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jobFromProtos(bqjob.JobReference, bqjob.Configuration), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
job, err := s.getJobInternal(ctx, projectID, jobID, "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := jobStatusFromProto(job.Status)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.Statistics = jobStatisticsFromProto(job.Statistics)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||
var job *bq.Job
|
||||
call := s.s.Jobs.Get(projectID, jobID).Context(ctx)
|
||||
if len(fields) > 0 {
|
||||
call = call.Fields(fields...)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
job, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
call := s.s.Jobs.Cancel(projectID, jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func jobFromProtos(jr *bq.JobReference, config *bq.JobConfiguration) *Job {
|
||||
var isQuery bool
|
||||
var dest *bq.TableReference
|
||||
if config.Query != nil {
|
||||
isQuery = true
|
||||
dest = config.Query.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: jr.ProjectId,
|
||||
jobID: jr.JobId,
|
||||
isQuery: isQuery,
|
||||
destinationTable: dest,
|
||||
}
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
||||
state, ok := stateMap[status.State]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected job state: %v", status.State)
|
||||
}
|
||||
|
||||
newStatus := &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
|
||||
newStatus.err = err
|
||||
}
|
||||
|
||||
for _, ep := range status.Errors {
|
||||
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
|
||||
}
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
func jobStatisticsFromProto(s *bq.JobStatistics) *JobStatistics {
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, convertTableReference(tr))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: convertTableSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
return js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
req := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var res *bq.TableList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, convertTableReference(t.TableReference))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
// createTable creates a table in the BigQuery service.
|
||||
// If tm.ViewQuery is non-empty, the created table will be of type VIEW.
|
||||
// Note: expiration can only be set during table creation.
|
||||
// Note: after table creation, a view can be modified only if its table was initially created with a view.
|
||||
func (s *bigqueryService) createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error {
|
||||
table, err := bqTableFromMetadata(tm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
table.TableReference = &bq.TableReference{
|
||||
ProjectId: projectID,
|
||||
DatasetId: datasetID,
|
||||
TableId: tableID,
|
||||
}
|
||||
req := s.s.Tables.Insert(projectID, datasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func bqTableFromMetadata(tm *TableMetadata) (*bq.Table, error) {
|
||||
t := &bq.Table{}
|
||||
if tm == nil {
|
||||
return t, nil
|
||||
}
|
||||
if tm.Schema != nil && tm.ViewQuery != "" {
|
||||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
|
||||
}
|
||||
t.FriendlyName = tm.Name
|
||||
t.Description = tm.Description
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.asTableSchema()
|
||||
}
|
||||
if tm.ViewQuery != "" {
|
||||
if tm.UseStandardSQL && tm.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
|
||||
if tm.UseLegacySQL {
|
||||
t.View.UseLegacySql = true
|
||||
} else {
|
||||
t.View.UseLegacySql = false
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
} else if tm.UseLegacySQL || tm.UseStandardSQL {
|
||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
||||
}
|
||||
if tm.TimePartitioning != nil {
|
||||
t.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(tm.TimePartitioning.Expiration / time.Millisecond),
|
||||
}
|
||||
}
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
}
|
||||
|
||||
if tm.FullID != "" {
|
||||
return nil, errors.New("cannot set FullID on create")
|
||||
}
|
||||
if tm.Type != "" {
|
||||
return nil, errors.New("cannot set Type on create")
|
||||
}
|
||||
if !tm.CreationTime.IsZero() {
|
||||
return nil, errors.New("cannot set CreationTime on create")
|
||||
}
|
||||
if !tm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("cannot set LastModifiedTime on create")
|
||||
}
|
||||
if tm.NumBytes != 0 {
|
||||
return nil, errors.New("cannot set NumBytes on create")
|
||||
}
|
||||
if tm.NumRows != 0 {
|
||||
return nil, errors.New("cannot set NumRows on create")
|
||||
}
|
||||
if tm.StreamingBuffer != nil {
|
||||
return nil, errors.New("cannot set StreamingBuffer on create")
|
||||
}
|
||||
if tm.ETag != "" {
|
||||
return nil, errors.New("cannot set ETag on create")
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var table *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Do() })
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
FullID: t.Id,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
ETag: t.Etag,
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.ViewQuery = t.View.Query
|
||||
md.UseLegacySQL = t.View.UseLegacySql
|
||||
}
|
||||
if t.TimePartitioning != nil {
|
||||
md.TimePartitioning = &TimePartitioning{
|
||||
Expiration: time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond,
|
||||
}
|
||||
}
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
|
||||
/// TODO(jba): access
|
||||
return &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
FullID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
ETag: d.Etag,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
func convertTableReference(tr *bq.TableReference) *Table {
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
// patchTableConf contains fields to be patched.
|
||||
type patchTableConf struct {
|
||||
// These fields are omitted from the patch operation if nil.
|
||||
Description *string
|
||||
Name *string
|
||||
Schema Schema
|
||||
ExpirationTime time.Time
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error) {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if conf.Description != nil {
|
||||
t.Description = *conf.Description
|
||||
forceSend("Description")
|
||||
}
|
||||
if conf.Name != nil {
|
||||
t.FriendlyName = *conf.Name
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if conf.Schema != nil {
|
||||
t.Schema = conf.Schema.asTableSchema()
|
||||
forceSend("Schema")
|
||||
}
|
||||
if !conf.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = conf.ExpirationTime.UnixNano() / 1e6
|
||||
forceSend("ExpirationTime")
|
||||
}
|
||||
call := s.s.Tables.Patch(projectID, datasetID, tableID, t).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var table *bq.Table
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error {
|
||||
// TODO(jba): retry?
|
||||
ds, err := bqDatasetFromMetadata(dm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: datasetID}
|
||||
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
|
||||
ds := bqDatasetFromUpdateMetadata(dm)
|
||||
call := s.s.Datasets.Patch(projectID, datasetID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var ds2 *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds2, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(ds2), nil
|
||||
}
|
||||
|
||||
func bqDatasetFromMetadata(dm *DatasetMetadata) (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
if dm == nil {
|
||||
return ds, nil
|
||||
}
|
||||
ds.FriendlyName = dm.Name
|
||||
ds.Description = dm.Description
|
||||
ds.Location = dm.Location
|
||||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
|
||||
ds.Labels = dm.Labels
|
||||
if !dm.CreationTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
|
||||
}
|
||||
if !dm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
|
||||
}
|
||||
if dm.FullID != "" {
|
||||
return nil, errors.New("bigquery: Dataset.FullID is not writable")
|
||||
}
|
||||
if dm.ETag != "" {
|
||||
return nil, errors.New("bigquery: Dataset.ETag is not writable")
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func bqDatasetFromUpdateMetadata(dm *DatasetMetadataToUpdate) *bq.Dataset {
|
||||
ds := &bq.Dataset{}
|
||||
forceSend := func(field string) {
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if dm.Description != nil {
|
||||
ds.Description = optional.ToString(dm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if dm.Name != nil {
|
||||
ds.FriendlyName = optional.ToString(dm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if dm.DefaultTableExpiration != nil {
|
||||
dur := optional.ToDuration(dm.DefaultTableExpiration)
|
||||
if dur == 0 {
|
||||
// Send a null to delete the field.
|
||||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
|
||||
} else {
|
||||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
|
||||
}
|
||||
}
|
||||
if dm.setLabels != nil || dm.deleteLabels != nil {
|
||||
ds.Labels = map[string]string{}
|
||||
for k, v := range dm.setLabels {
|
||||
ds.Labels[k] = v
|
||||
}
|
||||
if len(ds.Labels) == 0 && len(dm.deleteLabels) > 0 {
|
||||
forceSend("Labels")
|
||||
}
|
||||
for l := range dm.deleteLabels {
|
||||
ds.NullFields = append(ds.NullFields, "Labels."+l)
|
||||
}
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Do() })
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
|
||||
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var ds *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds, err = req.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(ds), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
|
||||
req := s.s.Datasets.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
All(all)
|
||||
setClientHeader(req.Header())
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
if filter != "" {
|
||||
req.Filter(filter)
|
||||
}
|
||||
var res *bq.DatasetList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var datasets []*Dataset
|
||||
for _, d := range res.Datasets {
|
||||
datasets = append(datasets, s.convertListedDataset(d))
|
||||
}
|
||||
return datasets, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listJobs(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error) {
|
||||
req := s.s.Jobs.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
Projection("full").
|
||||
AllUsers(all)
|
||||
if state != "" {
|
||||
req.StateFilter(state)
|
||||
}
|
||||
setClientHeader(req.Header())
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var jobInfos []JobInfo
|
||||
for _, j := range res.Jobs {
|
||||
ji, err := s.convertListedJob(j)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
jobInfos = append(jobInfos, ji)
|
||||
}
|
||||
return jobInfos, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedJob(j *bq.JobListJobs) (JobInfo, error) {
|
||||
st, err := jobStatusFromProto(j.Status)
|
||||
if err != nil {
|
||||
return JobInfo{}, err
|
||||
}
|
||||
st.Statistics = jobStatisticsFromProto(j.Statistics)
|
||||
return JobInfo{
|
||||
Job: jobFromProtos(j.JobReference, j.Configuration),
|
||||
Status: st,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// This is the correct definition of retryable according to the BigQuery team.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
293
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
293
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
|
@ -15,6 +15,7 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -59,6 +60,7 @@ type TableMetadata struct {
|
|||
|
||||
// Use Legacy SQL for the view query. The default.
|
||||
// At most one of UseLegacySQL and UseStandardSQL can be true.
|
||||
// Deprecated: use UseLegacySQL.
|
||||
UseStandardSQL bool
|
||||
|
||||
// If non-nil, the table is partitioned by time.
|
||||
|
@ -68,6 +70,12 @@ type TableMetadata struct {
|
|||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
// User-provided labels.
|
||||
Labels map[string]string
|
||||
|
||||
// Information about a table stored outside of BigQuery.
|
||||
ExternalDataConfig *ExternalDataConfig
|
||||
|
||||
// All the fields below are read-only.
|
||||
|
||||
FullID string // An opaque ID uniquely identifying the table.
|
||||
|
@ -141,6 +149,25 @@ type TimePartitioning struct {
|
|||
Expiration time.Duration
|
||||
}
|
||||
|
||||
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(p.Expiration / time.Millisecond),
|
||||
}
|
||||
}
|
||||
|
||||
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
|
||||
if q == nil {
|
||||
return nil
|
||||
}
|
||||
return &TimePartitioning{
|
||||
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamingBuffer holds information about the streaming buffer.
|
||||
type StreamingBuffer struct {
|
||||
// A lower-bound estimate of the number of bytes currently in the streaming
|
||||
|
@ -155,7 +182,7 @@ type StreamingBuffer struct {
|
|||
OldestEntryTime time.Time
|
||||
}
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
func (t *Table) toBQ() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
|
@ -174,60 +201,280 @@ func (t *Table) implicitTable() bool {
|
|||
}
|
||||
|
||||
// Create creates a table in the BigQuery service.
|
||||
// Pass in a TableMetadata value to configure the dataset.
|
||||
// Pass in a TableMetadata value to configure the table.
|
||||
// If tm.View.Query is non-empty, the created table will be of type VIEW.
|
||||
// Expiration can only be set during table creation.
|
||||
// After table creation, a view can be modified only if its table was initially created
|
||||
// with a view.
|
||||
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
|
||||
return t.c.service.createTable(ctx, t.ProjectID, t.DatasetID, t.TableID, tm)
|
||||
table, err := tm.toBQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
table.TableReference = &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
||||
t := &bq.Table{}
|
||||
if tm == nil {
|
||||
return t, nil
|
||||
}
|
||||
if tm.Schema != nil && tm.ViewQuery != "" {
|
||||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
|
||||
}
|
||||
t.FriendlyName = tm.Name
|
||||
t.Description = tm.Description
|
||||
t.Labels = tm.Labels
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.toBQ()
|
||||
}
|
||||
if tm.ViewQuery != "" {
|
||||
if tm.UseStandardSQL && tm.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
|
||||
if tm.UseLegacySQL {
|
||||
t.View.UseLegacySql = true
|
||||
} else {
|
||||
t.View.UseLegacySql = false
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
} else if tm.UseLegacySQL || tm.UseStandardSQL {
|
||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
||||
}
|
||||
t.TimePartitioning = tm.TimePartitioning.toBQ()
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
}
|
||||
if tm.ExternalDataConfig != nil {
|
||||
edc := tm.ExternalDataConfig.toBQ()
|
||||
t.ExternalDataConfiguration = &edc
|
||||
}
|
||||
if tm.FullID != "" {
|
||||
return nil, errors.New("cannot set FullID on create")
|
||||
}
|
||||
if tm.Type != "" {
|
||||
return nil, errors.New("cannot set Type on create")
|
||||
}
|
||||
if !tm.CreationTime.IsZero() {
|
||||
return nil, errors.New("cannot set CreationTime on create")
|
||||
}
|
||||
if !tm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("cannot set LastModifiedTime on create")
|
||||
}
|
||||
if tm.NumBytes != 0 {
|
||||
return nil, errors.New("cannot set NumBytes on create")
|
||||
}
|
||||
if tm.NumRows != 0 {
|
||||
return nil, errors.New("cannot set NumRows on create")
|
||||
}
|
||||
if tm.StreamingBuffer != nil {
|
||||
return nil, errors.New("cannot set StreamingBuffer on create")
|
||||
}
|
||||
if tm.ETag != "" {
|
||||
return nil, errors.New("cannot set ETag on create")
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var table *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToTableMetadata(table)
|
||||
}
|
||||
|
||||
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
FullID: t.Id,
|
||||
Labels: t.Labels,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
ETag: t.Etag,
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = bqToSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.ViewQuery = t.View.Query
|
||||
md.UseLegacySQL = t.View.UseLegacySql
|
||||
}
|
||||
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
if t.ExternalDataConfiguration != nil {
|
||||
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md.ExternalDataConfig = edc
|
||||
}
|
||||
return md, nil
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
}
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
return t.read(ctx, fetchPage)
|
||||
}
|
||||
|
||||
func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
|
||||
return newRowIterator(ctx, t, pf)
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
|
||||
var conf patchTableConf
|
||||
bqt := tm.toBQ()
|
||||
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var res *bq.Table
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToTableMetadata(res)
|
||||
}
|
||||
|
||||
func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if tm.Description != nil {
|
||||
s := optional.ToString(tm.Description)
|
||||
conf.Description = &s
|
||||
t.Description = optional.ToString(tm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if tm.Name != nil {
|
||||
s := optional.ToString(tm.Name)
|
||||
conf.Name = &s
|
||||
t.FriendlyName = optional.ToString(tm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
conf.Schema = tm.Schema
|
||||
conf.ExpirationTime = tm.ExpirationTime
|
||||
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf, etag)
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.toBQ()
|
||||
forceSend("Schema")
|
||||
}
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
forceSend("ExpirationTime")
|
||||
}
|
||||
if tm.ViewQuery != nil {
|
||||
t.View = &bq.ViewDefinition{
|
||||
Query: optional.ToString(tm.ViewQuery),
|
||||
ForceSendFields: []string{"Query"},
|
||||
}
|
||||
}
|
||||
if tm.UseLegacySQL != nil {
|
||||
if t.View == nil {
|
||||
t.View = &bq.ViewDefinition{}
|
||||
}
|
||||
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
labels, forces, nulls := tm.update()
|
||||
t.Labels = labels
|
||||
t.ForceSendFields = append(t.ForceSendFields, forces...)
|
||||
t.NullFields = append(t.NullFields, nulls...)
|
||||
return t
|
||||
}
|
||||
|
||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type TableMetadataToUpdate struct {
|
||||
// Description is the user-friendly description of this table.
|
||||
// The user-friendly description of this table.
|
||||
Description optional.String
|
||||
|
||||
// Name is the user-friendly name for this table.
|
||||
// The user-friendly name for this table.
|
||||
Name optional.String
|
||||
|
||||
// Schema is the table's schema.
|
||||
// The table's schema.
|
||||
// When updating a schema, you can add columns but not remove them.
|
||||
Schema Schema
|
||||
// TODO(jba): support updating the view
|
||||
|
||||
// ExpirationTime is the time when this table expires.
|
||||
// The time when this table expires.
|
||||
ExpirationTime time.Time
|
||||
|
||||
// The query to use for a view.
|
||||
ViewQuery optional.String
|
||||
|
||||
// Use Legacy SQL for the view query.
|
||||
UseLegacySQL optional.Bool
|
||||
|
||||
labelUpdater
|
||||
}
|
||||
|
||||
// labelUpdater contains common code for updating labels.
|
||||
type labelUpdater struct {
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
||||
// SetLabel causes a label to be added or modified on a call to Update.
|
||||
func (u *labelUpdater) SetLabel(name, value string) {
|
||||
if u.setLabels == nil {
|
||||
u.setLabels = map[string]string{}
|
||||
}
|
||||
u.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted on a call to Update.
|
||||
func (u *labelUpdater) DeleteLabel(name string) {
|
||||
if u.deleteLabels == nil {
|
||||
u.deleteLabels = map[string]bool{}
|
||||
}
|
||||
u.deleteLabels[name] = true
|
||||
}
|
||||
|
||||
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
|
||||
if u.setLabels == nil && u.deleteLabels == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
labels = map[string]string{}
|
||||
for k, v := range u.setLabels {
|
||||
labels[k] = v
|
||||
}
|
||||
if len(labels) == 0 && len(u.deleteLabels) > 0 {
|
||||
forces = []string{"Labels"}
|
||||
}
|
||||
for l := range u.deleteLabels {
|
||||
nulls = append(nulls, "Labels."+l)
|
||||
}
|
||||
return labels, forces, nulls
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -19,11 +19,10 @@ import (
|
|||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestBQTableToMetadata(t *testing.T) {
|
||||
func TestBQToTableMetadata(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
aTimeMillis := aTime.UnixNano() / 1e6
|
||||
for _, test := range []struct {
|
||||
|
@ -53,21 +52,27 @@ func TestBQTableToMetadata(t *testing.T) {
|
|||
ExpirationMs: 7890,
|
||||
Type: "DAY",
|
||||
},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{
|
||||
SourceFormat: "GOOGLE_SHEETS",
|
||||
},
|
||||
},
|
||||
&TableMetadata{
|
||||
Description: "desc",
|
||||
Name: "fname",
|
||||
ViewQuery: "view-query",
|
||||
FullID: "id",
|
||||
Type: ExternalTable,
|
||||
ExpirationTime: aTime.Truncate(time.Millisecond),
|
||||
CreationTime: aTime.Truncate(time.Millisecond),
|
||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||
NumBytes: 123,
|
||||
NumRows: 7,
|
||||
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond},
|
||||
Description: "desc",
|
||||
Name: "fname",
|
||||
ViewQuery: "view-query",
|
||||
FullID: "id",
|
||||
Type: ExternalTable,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets},
|
||||
ExpirationTime: aTime.Truncate(time.Millisecond),
|
||||
CreationTime: aTime.Truncate(time.Millisecond),
|
||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||
NumBytes: 123,
|
||||
NumRows: 7,
|
||||
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond},
|
||||
StreamingBuffer: &StreamingBuffer{
|
||||
EstimatedBytes: 11,
|
||||
EstimatedRows: 3,
|
||||
|
@ -77,14 +82,17 @@ func TestBQTableToMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
} {
|
||||
got := bqTableToMetadata(test.in)
|
||||
got, err := bqToTableMetadata(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, test.want); diff != "" {
|
||||
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQTableFromMetadata(t *testing.T) {
|
||||
func TestTableMetadataToBQ(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
aTimeMillis := aTime.UnixNano() / 1e6
|
||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
|
||||
|
@ -97,10 +105,12 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
{&TableMetadata{}, &bq.Table{}},
|
||||
{
|
||||
&TableMetadata{
|
||||
Name: "n",
|
||||
Description: "d",
|
||||
Schema: sc,
|
||||
ExpirationTime: aTime,
|
||||
Name: "n",
|
||||
Description: "d",
|
||||
Schema: sc,
|
||||
ExpirationTime: aTime,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
|
||||
},
|
||||
&bq.Table{
|
||||
FriendlyName: "n",
|
||||
|
@ -111,6 +121,8 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
ExpirationTime: aTimeMillis,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -159,7 +171,7 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
} {
|
||||
got, err := bqTableFromMetadata(test.in)
|
||||
got, err := test.in.toBQ()
|
||||
if err != nil {
|
||||
t.Fatalf("%+v: %v", test.in, err)
|
||||
}
|
||||
|
@ -183,69 +195,89 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
{StreamingBuffer: &StreamingBuffer{}},
|
||||
{ETag: "x"},
|
||||
} {
|
||||
_, err := bqTableFromMetadata(in)
|
||||
_, err := in.toBQ()
|
||||
if err == nil {
|
||||
t.Errorf("%+v: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQDatasetFromMetadata(t *testing.T) {
|
||||
func TestTableMetadataToUpdateToBQ(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
for _, test := range []struct {
|
||||
in *DatasetMetadata
|
||||
want *bq.Dataset
|
||||
tm TableMetadataToUpdate
|
||||
want *bq.Table
|
||||
}{
|
||||
{nil, &bq.Dataset{}},
|
||||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
|
||||
{&DatasetMetadata{
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
}, &bq.Dataset{
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
}},
|
||||
{
|
||||
tm: TableMetadataToUpdate{},
|
||||
want: &bq.Table{},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{
|
||||
Description: "d",
|
||||
Name: "n",
|
||||
},
|
||||
want: &bq.Table{
|
||||
Description: "d",
|
||||
FriendlyName: "n",
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{
|
||||
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)},
|
||||
ExpirationTime: aTime,
|
||||
},
|
||||
want: &bq.Table{
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
ExpirationTime: aTime.UnixNano() / 1e6,
|
||||
ForceSendFields: []string{"Schema", "ExpirationTime"},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{ViewQuery: "q"},
|
||||
want: &bq.Table{
|
||||
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{UseLegacySQL: false},
|
||||
want: &bq.Table{
|
||||
View: &bq.ViewDefinition{
|
||||
UseLegacySql: false,
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true},
|
||||
want: &bq.Table{
|
||||
View: &bq.ViewDefinition{
|
||||
Query: "q",
|
||||
UseLegacySql: true,
|
||||
ForceSendFields: []string{"Query", "UseLegacySql"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: func() (tm TableMetadataToUpdate) {
|
||||
tm.SetLabel("L", "V")
|
||||
tm.DeleteLabel("D")
|
||||
return tm
|
||||
}(),
|
||||
want: &bq.Table{
|
||||
Labels: map[string]string{"L": "V"},
|
||||
NullFields: []string{"Labels.D"},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := bqDatasetFromMetadata(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := test.tm.toBQ()
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that non-writeable fields are unset.
|
||||
_, err := bqDatasetFromMetadata(&DatasetMetadata{FullID: "x"})
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQDatasetFromUpdateMetadata(t *testing.T) {
|
||||
dm := DatasetMetadataToUpdate{
|
||||
Description: "desc",
|
||||
Name: "name",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
}
|
||||
dm.SetLabel("label", "value")
|
||||
dm.DeleteLabel("del")
|
||||
|
||||
got := bqDatasetFromUpdateMetadata(&dm)
|
||||
want := &bq.Dataset{
|
||||
Description: "desc",
|
||||
FriendlyName: "name",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Labels: map[string]string{"label": "value"},
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
NullFields: []string{"Labels.del"},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
75
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
75
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Uploader does streaming inserts into a BigQuery table.
|
||||
|
@ -151,27 +152,67 @@ func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
|||
}
|
||||
|
||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||
var rows []*insertionRow
|
||||
for _, saver := range src {
|
||||
req, err := u.newInsertRequest(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
|
||||
call = call.Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err = runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return handleInsertErrors(res.InsertErrors, req.Rows)
|
||||
}
|
||||
|
||||
func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: u.TableTemplateSuffix,
|
||||
IgnoreUnknownValues: u.IgnoreUnknownValues,
|
||||
SkipInvalidRows: u.SkipInvalidRows,
|
||||
}
|
||||
for _, saver := range savers {
|
||||
row, insertID, err := saver.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
|
||||
if insertID == "" {
|
||||
insertID = randomIDFn()
|
||||
}
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: insertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
|
||||
return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{
|
||||
skipInvalidRows: u.SkipInvalidRows,
|
||||
ignoreUnknownValues: u.IgnoreUnknownValues,
|
||||
templateSuffix: u.TableTemplateSuffix,
|
||||
})
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// An insertionRow represents a row of data to be inserted into a table.
|
||||
type insertionRow struct {
|
||||
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
|
||||
// this row on a best-effort basis.
|
||||
InsertID string
|
||||
// The data to be inserted, represented as a map from field name to Value.
|
||||
Row map[string]Value
|
||||
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
|
||||
if len(ierrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
var errs PutMultiError
|
||||
for _, e := range ierrs {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertId,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, bqToError(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
|
296
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
296
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
|
@ -15,228 +15,137 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type testSaver struct {
|
||||
ir *insertionRow
|
||||
err error
|
||||
row map[string]Value
|
||||
insertID string
|
||||
err error
|
||||
}
|
||||
|
||||
func (ts testSaver) Save() (map[string]Value, string, error) {
|
||||
return ts.ir.Row, ts.ir.InsertID, ts.err
|
||||
return ts.row, ts.insertID, ts.err
|
||||
}
|
||||
|
||||
func TestRejectsNonValueSavers(t *testing.T) {
|
||||
client := &Client{projectID: "project-id"}
|
||||
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")}
|
||||
inputs := []interface{}{
|
||||
1,
|
||||
[]int{1, 2},
|
||||
[]interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
StructSaver{},
|
||||
}
|
||||
for _, in := range inputs {
|
||||
if err := u.Put(context.Background(), in); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestNewInsertRequest(t *testing.T) {
|
||||
prev := randomIDFn
|
||||
n := 0
|
||||
randomIDFn = func() string { n++; return strconv.Itoa(n) }
|
||||
defer func() { randomIDFn = prev }()
|
||||
|
||||
type insertRowsRecorder struct {
|
||||
rowBatches [][]*insertionRow
|
||||
service
|
||||
}
|
||||
|
||||
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
irr.rowBatches = append(irr.rowBatches, rows)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInsertsData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data [][]*insertionRow
|
||||
tests := []struct {
|
||||
ul *Uploader
|
||||
savers []ValueSaver
|
||||
req *bq.TableDataInsertAllRequest
|
||||
}{
|
||||
{
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
ul: &Uploader{},
|
||||
req: &bq.TableDataInsertAllRequest{},
|
||||
},
|
||||
{
|
||||
ul: &Uploader{},
|
||||
savers: []ValueSaver{
|
||||
testSaver{row: map[string]Value{"one": 1}},
|
||||
testSaver{row: map[string]Value{"two": 2}},
|
||||
},
|
||||
req: &bq.TableDataInsertAllRequest{
|
||||
Rows: []*bq.TableDataInsertAllRequestRows{
|
||||
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
|
||||
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
ul: &Uploader{
|
||||
TableTemplateSuffix: "suffix",
|
||||
IgnoreUnknownValues: true,
|
||||
SkipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
savers: []ValueSaver{
|
||||
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
|
||||
testSaver{insertID: "", row: map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"c", map[string]Value{"three": 3}},
|
||||
&insertionRow{"d", map[string]Value{"four": 4}},
|
||||
req: &bq.TableDataInsertAllRequest{
|
||||
Rows: []*bq.TableDataInsertAllRequestRows{
|
||||
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
|
||||
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
|
||||
},
|
||||
TemplateSuffix: "suffix",
|
||||
SkipInvalidRows: true,
|
||||
IgnoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
irr := &insertRowsRecorder{}
|
||||
client := &Client{
|
||||
projectID: "project-id",
|
||||
service: irr,
|
||||
}
|
||||
u := client.Dataset("dataset-id").Table("table-id").Uploader()
|
||||
for _, batch := range tc.data {
|
||||
if len(batch) == 0 {
|
||||
continue
|
||||
}
|
||||
var toUpload interface{}
|
||||
if len(batch) == 1 {
|
||||
toUpload = testSaver{ir: batch[0]}
|
||||
} else {
|
||||
savers := []testSaver{}
|
||||
for _, row := range batch {
|
||||
savers = append(savers, testSaver{ir: row})
|
||||
}
|
||||
toUpload = savers
|
||||
}
|
||||
|
||||
err := u.Put(context.Background(), toUpload)
|
||||
if err != nil {
|
||||
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
|
||||
}
|
||||
}
|
||||
if got, want := irr.rowBatches, tc.data; !testutil.Equal(got, want) {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type uploadOptionRecorder struct {
|
||||
received *insertRowsConf
|
||||
service
|
||||
}
|
||||
|
||||
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
u.received = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUploadOptionsPropagate(t *testing.T) {
|
||||
// we don't care for the data in this testcase.
|
||||
dummyData := testSaver{ir: &insertionRow{}}
|
||||
recorder := new(uploadOptionRecorder)
|
||||
c := &Client{service: recorder}
|
||||
table := &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
c: c,
|
||||
}
|
||||
|
||||
tests := [...]struct {
|
||||
ul *Uploader
|
||||
conf insertRowsConf
|
||||
}{
|
||||
{
|
||||
// test zero options lead to zero value for insertRowsConf
|
||||
ul: table.Uploader(),
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.IgnoreUnknownValues = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
skipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{ // multiple upload options combine
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
u.IgnoreUnknownValues = true
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
skipInvalidRows: true,
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
err := tc.ul.Put(context.Background(), dummyData)
|
||||
got, err := tc.ul.newInsertRequest(tc.savers)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if recorder.received == nil {
|
||||
t.Fatalf("%d: received no options at all!", i)
|
||||
want := tc.req
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%#d: %#v: got %#v, want %#v", i, tc.ul, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
want := tc.conf
|
||||
got := *recorder.received
|
||||
if got != want {
|
||||
t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul)
|
||||
func TestNewInsertRequestErrors(t *testing.T) {
|
||||
var u Uploader
|
||||
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}})
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleInsertErrors(t *testing.T) {
|
||||
rows := []*bq.TableDataInsertAllRequestRows{
|
||||
{InsertId: "a"},
|
||||
{InsertId: "b"},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in []*bq.TableDataInsertAllResponseInsertErrors
|
||||
want error
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
||||
},
|
||||
{
|
||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
||||
},
|
||||
{
|
||||
in: []*bq.TableDataInsertAllResponseInsertErrors{
|
||||
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
|
||||
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
|
||||
},
|
||||
want: PutMultiError{
|
||||
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
|
||||
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := handleInsertErrors(test.in, rows)
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSavers(t *testing.T) {
|
||||
ts := &testSaver{ir: &insertionRow{}}
|
||||
ts := &testSaver{}
|
||||
type T struct{ I int }
|
||||
schema, err := InferSchema(T{})
|
||||
if err != nil {
|
||||
|
@ -279,3 +188,20 @@ func TestValueSavers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSaversErrors(t *testing.T) {
|
||||
inputs := []interface{}{
|
||||
1,
|
||||
[]int{1, 2},
|
||||
[]interface{}{
|
||||
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
|
||||
1,
|
||||
},
|
||||
StructSaver{},
|
||||
}
|
||||
for _, in := range inputs {
|
||||
if _, err := valueSavers(in); err == nil {
|
||||
t.Errorf("%#v: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
81
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
81
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
|
@ -243,7 +243,7 @@ func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, err
|
|||
}
|
||||
|
||||
// determineSetFunc chooses the best function for setting a field of type ftype
|
||||
// to a value whose schema field type is sftype. It returns nil if stype
|
||||
// to a value whose schema field type is stype. It returns nil if stype
|
||||
// is not assignable to ftype.
|
||||
// determineSetFunc considers only basic types. See compileToOps for
|
||||
// handling of repetition and nesting.
|
||||
|
@ -405,7 +405,7 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
|||
m := make(map[string]Value)
|
||||
for i, fieldSchema := range schema {
|
||||
if fieldSchema.Type != RecordFieldType {
|
||||
m[fieldSchema.Name] = vs[i]
|
||||
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
|
||||
continue
|
||||
}
|
||||
// Nested record, possibly repeated.
|
||||
|
@ -510,14 +510,9 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
|
|||
schemaField.Name, vfield.Type())
|
||||
}
|
||||
|
||||
// A non-nested field can be represented by its Go value.
|
||||
// A non-nested field can be represented by its Go value, except for civil times.
|
||||
if schemaField.Type != RecordFieldType {
|
||||
if !schemaField.Repeated || vfield.Len() > 0 {
|
||||
return vfield.Interface(), nil
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil, nil
|
||||
return toUploadValueReflect(vfield, schemaField), nil
|
||||
}
|
||||
// A non-repeated nested field is converted into a map[string]Value.
|
||||
if !schemaField.Repeated {
|
||||
|
@ -545,6 +540,73 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
|
|||
return vals, nil
|
||||
}
|
||||
|
||||
func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
|
||||
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType {
|
||||
return toUploadValueReflect(reflect.ValueOf(val), fs)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
|
||||
switch fs.Type {
|
||||
case TimeFieldType:
|
||||
return civilToUploadValue(v, fs, func(v reflect.Value) string {
|
||||
return CivilTimeString(v.Interface().(civil.Time))
|
||||
})
|
||||
case DateTimeFieldType:
|
||||
return civilToUploadValue(v, fs, func(v reflect.Value) string {
|
||||
return CivilDateTimeString(v.Interface().(civil.DateTime))
|
||||
})
|
||||
default:
|
||||
if !fs.Repeated || v.Len() > 0 {
|
||||
return v.Interface()
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func civilToUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
|
||||
if !fs.Repeated {
|
||||
return cvt(v)
|
||||
}
|
||||
if v.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
s := make([]string, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
s[i] = cvt(v.Index(i))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// CivilTimeString returns a string representing a civil.Time in a format compatible
|
||||
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
|
||||
// string with six digits of sub-second precision.
|
||||
//
|
||||
// Use CivilTimeString when using civil.Time in DML, for example in INSERT
|
||||
// statements.
|
||||
func CivilTimeString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
}
|
||||
}
|
||||
|
||||
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
|
||||
// with BigQuery SQL. It separate the date and time with a space, and formats the time
|
||||
// with CivilTimeString.
|
||||
//
|
||||
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
|
||||
// statements.
|
||||
func CivilDateTimeString(dt civil.DateTime) string {
|
||||
return dt.Date.String() + " " + CivilTimeString(dt.Time)
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
|
@ -618,7 +680,6 @@ func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, erro
|
|||
for i, cell := range record {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
|
||||
fs := schema[i]
|
||||
v, err := convertValue(val, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
|
|
246
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
246
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
|
@ -58,22 +58,31 @@ func TestConvertBasicValues(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConvertTime(t *testing.T) {
|
||||
// TODO(jba): add tests for civil time types.
|
||||
schema := []*FieldSchema{
|
||||
{Type: TimestampFieldType},
|
||||
{Type: DateFieldType},
|
||||
{Type: TimeFieldType},
|
||||
{Type: DateTimeFieldType},
|
||||
}
|
||||
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
|
||||
ts := testTimestamp.Round(time.Millisecond)
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
|
||||
{V: fmt.Sprintf("%.10f", float64(ts.UnixNano())/1e9)},
|
||||
{V: testDate.String()},
|
||||
{V: testTime.String()},
|
||||
{V: testDateTime.String()},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
if !got[0].(time.Time).Equal(thyme) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
|
||||
want := []Value{ts, testDate, testTime, testDateTime}
|
||||
for i, g := range got {
|
||||
w := want[i]
|
||||
if !testutil.Equal(g, w) {
|
||||
t.Errorf("#%d: got:\n%v\nwant:\n%v", i, g, w)
|
||||
}
|
||||
}
|
||||
if got[0].(time.Time).Location() != time.UTC {
|
||||
t.Errorf("expected time zone UTC: got:\n%v", got)
|
||||
|
@ -337,24 +346,58 @@ func TestRepeatedRecordContainingRecord(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvertRowErrors(t *testing.T) {
|
||||
// mismatched lengths
|
||||
if _, err := convertRow(&bq.TableRow{F: []*bq.TableCell{{V: ""}}}, Schema{}); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
v3 := map[string]interface{}{"v": 3}
|
||||
for _, test := range []struct {
|
||||
value interface{}
|
||||
fs FieldSchema
|
||||
}{
|
||||
{3, FieldSchema{Type: IntegerFieldType}}, // not a string
|
||||
{[]interface{}{v3}, // not a string, repeated
|
||||
FieldSchema{Type: IntegerFieldType, Repeated: true}},
|
||||
{map[string]interface{}{"f": []interface{}{v3}}, // not a string, nested
|
||||
FieldSchema{Type: RecordFieldType, Schema: Schema{{Type: IntegerFieldType}}}},
|
||||
{map[string]interface{}{"f": []interface{}{v3}}, // wrong length, nested
|
||||
FieldSchema{Type: RecordFieldType, Schema: Schema{}}},
|
||||
} {
|
||||
_, err := convertRow(
|
||||
&bq.TableRow{F: []*bq.TableCell{{V: test.value}}},
|
||||
Schema{&test.fs})
|
||||
if err == nil {
|
||||
t.Errorf("value %v, fs %v: got nil, want error", test.value, test.fs)
|
||||
}
|
||||
}
|
||||
|
||||
// bad field type
|
||||
if _, err := convertBasicType("", FieldType("BAD")); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||
testCases := []struct {
|
||||
vs ValuesSaver
|
||||
want *insertionRow
|
||||
vs ValuesSaver
|
||||
wantInsertID string
|
||||
wantRow map[string]Value
|
||||
}{
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{Name: "strField", Type: StringFieldType},
|
||||
{Name: "dtField", Type: DateTimeFieldType},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, "a"},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{"intField": 1, "strField": "a"},
|
||||
Row: []Value{1, "a",
|
||||
civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{"intField": 1, "strField": "a",
|
||||
"dtField": "0001-02-03 04:05:06.000007"},
|
||||
},
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
|
@ -371,13 +414,11 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||
InsertID: "iid",
|
||||
Row: []Value{1, []Value{[]Value{2, 3}}},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -402,25 +443,59 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
data, insertID, err := tc.vs.Save()
|
||||
gotRow, gotInsertID, err := tc.vs.Save()
|
||||
if err != nil {
|
||||
t.Errorf("Expected successful save; got: %v", err)
|
||||
continue
|
||||
}
|
||||
got := &insertionRow{insertID, data}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
|
||||
if !testutil.Equal(gotRow, tc.wantRow) {
|
||||
t.Errorf("%v row:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotRow, tc.wantRow)
|
||||
}
|
||||
if !testutil.Equal(gotInsertID, tc.wantInsertID) {
|
||||
t.Errorf("%v ID:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotInsertID, tc.wantInsertID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesToMapErrors(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
values []Value
|
||||
schema Schema
|
||||
}{
|
||||
{ // mismatched length
|
||||
[]Value{1},
|
||||
Schema{},
|
||||
},
|
||||
{ // nested record not a slice
|
||||
[]Value{1},
|
||||
Schema{{Type: RecordFieldType}},
|
||||
},
|
||||
{ // nested record mismatched length
|
||||
[]Value{[]Value{1}},
|
||||
Schema{{Type: RecordFieldType}},
|
||||
},
|
||||
{ // nested repeated record not a slice
|
||||
[]Value{[]Value{1}},
|
||||
Schema{{Type: RecordFieldType, Repeated: true}},
|
||||
},
|
||||
{ // nested repeated record mismatched length
|
||||
[]Value{[]Value{[]Value{1}}},
|
||||
Schema{{Type: RecordFieldType, Repeated: true}},
|
||||
},
|
||||
} {
|
||||
_, err := valuesToMap(test.values, test.schema)
|
||||
if err == nil {
|
||||
t.Errorf("%v, %v: got nil, want error", test.values, test.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -429,6 +504,8 @@ func TestStructSaver(t *testing.T) {
|
|||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "r", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "t", Type: TimeFieldType},
|
||||
{Name: "tr", Type: TimeFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
|
@ -442,6 +519,8 @@ func TestStructSaver(t *testing.T) {
|
|||
T struct {
|
||||
S string
|
||||
R []int
|
||||
T civil.Time
|
||||
TR []civil.Time
|
||||
Nested *N
|
||||
Rnested []*N
|
||||
}
|
||||
|
@ -464,22 +543,27 @@ func TestStructSaver(t *testing.T) {
|
|||
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
ct1 := civil.Time{1, 2, 3, 4000}
|
||||
ct2 := civil.Time{5, 6, 7, 8000}
|
||||
in := T{
|
||||
S: "x",
|
||||
R: []int{1, 2},
|
||||
T: ct1,
|
||||
TR: []civil.Time{ct1, ct2},
|
||||
Nested: &N{B: true},
|
||||
Rnested: []*N{{true}, {false}},
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"r": []int{1, 2},
|
||||
"t": "01:02:03.000004",
|
||||
"tr": []string{"01:02:03.000004", "05:06:07.000008"},
|
||||
"nested": map[string]Value{"b": true},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
|
||||
}
|
||||
check("all values", in, want)
|
||||
check("all values, ptr", &in, want)
|
||||
check("empty struct", T{}, map[string]Value{"s": ""})
|
||||
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00"})
|
||||
|
||||
// Missing and extra fields ignored.
|
||||
type T2 struct {
|
||||
|
@ -492,10 +576,39 @@ func TestStructSaver(t *testing.T) {
|
|||
check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}},
|
||||
map[string]Value{
|
||||
"s": "",
|
||||
"t": "00:00:00",
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructSaverErrors(t *testing.T) {
|
||||
type (
|
||||
badField struct {
|
||||
I int `bigquery:"@"`
|
||||
}
|
||||
badR struct{ R int }
|
||||
badRN struct{ R []int }
|
||||
)
|
||||
|
||||
for i, test := range []struct {
|
||||
struct_ interface{}
|
||||
schema Schema
|
||||
}{
|
||||
{0, nil}, // not a struct
|
||||
{&badField{}, nil}, // bad field name
|
||||
{&badR{}, Schema{{Name: "r", Repeated: true}}}, // repeated field has bad type
|
||||
{&badR{}, Schema{{Name: "r", Type: RecordFieldType}}}, // nested field has bad type
|
||||
{&badRN{[]int{0}}, // nested repeated field has bad type
|
||||
Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}},
|
||||
} {
|
||||
ss := &StructSaver{Struct: test.struct_, Schema: test.schema}
|
||||
_, _, err := ss.Save()
|
||||
if err == nil {
|
||||
t.Errorf("#%d, %v, %v: got nil, want error", i, test.struct_, test.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertRows(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
|
@ -528,6 +641,12 @@ func TestConvertRows(t *testing.T) {
|
|||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("\ngot %v\nwant %v", got, want)
|
||||
}
|
||||
|
||||
rows[0].F[0].V = 1
|
||||
_, err = convertRows(rows, schema)
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueList(t *testing.T) {
|
||||
|
@ -835,6 +954,65 @@ func TestStructLoaderErrors(t *testing.T) {
|
|||
type bad2 struct{ I uint } // unsupported integer type
|
||||
check(&bad2{})
|
||||
|
||||
type bad3 struct {
|
||||
I int `bigquery:"@"`
|
||||
} // bad field name
|
||||
check(&bad3{})
|
||||
|
||||
type bad4 struct{ Nested int } // non-struct for nested field
|
||||
check(&bad4{})
|
||||
|
||||
type bad5 struct{ Nested struct{ NestS int } } // bad nested struct
|
||||
check(&bad5{})
|
||||
|
||||
bad6 := &struct{ Nums int }{} // non-slice for repeated field
|
||||
sl := structLoader{}
|
||||
err := sl.set(bad6, repSchema)
|
||||
if err == nil {
|
||||
t.Errorf("%T: got nil, want error", bad6)
|
||||
}
|
||||
|
||||
// sl.set's error is sticky, with even good input.
|
||||
err2 := sl.set(&repStruct{}, repSchema)
|
||||
if err2 != err {
|
||||
t.Errorf("%v != %v, expected equal", err2, err)
|
||||
}
|
||||
// sl.Load is similarly sticky
|
||||
err2 = sl.Load(nil, nil)
|
||||
if err2 != err {
|
||||
t.Errorf("%v != %v, expected equal", err2, err)
|
||||
}
|
||||
|
||||
// Null values.
|
||||
schema := Schema{
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "d", Type: DateFieldType},
|
||||
}
|
||||
type s struct {
|
||||
I int
|
||||
F float64
|
||||
B bool
|
||||
S string
|
||||
By []byte
|
||||
D civil.Date
|
||||
}
|
||||
vals := []Value{int64(0), 0.0, false, "", []byte{}, testDate}
|
||||
if err := load(&s{}, schema, vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, e := range vals {
|
||||
vals[i] = nil
|
||||
got := load(&s{}, schema, vals)
|
||||
if got != errNoNulls {
|
||||
t.Errorf("#%d: got %v, want %v", i, got, errNoNulls)
|
||||
}
|
||||
vals[i] = e
|
||||
}
|
||||
|
||||
// Using more than one struct type with the same structLoader.
|
||||
type different struct {
|
||||
B bool
|
||||
|
@ -845,11 +1023,11 @@ func TestStructLoaderErrors(t *testing.T) {
|
|||
Nums []int
|
||||
}
|
||||
|
||||
var sl structLoader
|
||||
sl = structLoader{}
|
||||
if err := sl.set(&testStruct1{}, schema2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := sl.set(&different{}, schema2)
|
||||
err = sl.set(&different{}, schema2)
|
||||
if err == nil {
|
||||
t.Error("different struct types: got nil, want error")
|
||||
}
|
||||
|
|
5
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
5
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
|
@ -136,7 +136,7 @@ func (e *EmulatedEnv) Config() IntegrationTestConfig {
|
|||
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -152,7 +152,8 @@ func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
|||
func (e *EmulatedEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
4
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
|
@ -36,7 +36,7 @@ func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -80,10 +80,10 @@ func TestRetryApply(t *testing.T) {
|
|||
return handler(ctx, req)
|
||||
}
|
||||
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 1, []byte("val"))
|
||||
|
|
11
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
generated
vendored
11
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// Controller2CallOptions contains the retry settings for each method of Controller2Client.
|
||||
|
@ -77,7 +78,7 @@ type Controller2Client struct {
|
|||
CallOptions *Controller2CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewController2Client creates a new controller2 client.
|
||||
|
@ -134,7 +135,7 @@ func (c *Controller2Client) Close() error {
|
|||
func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// RegisterDebuggee registers the debuggee with the controller service.
|
||||
|
@ -148,7 +149,7 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
|
|||
// from data loss, or change the debuggee_id format. Agents must handle
|
||||
// debuggee_id value changing upon re-registration.
|
||||
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
|
||||
var resp *clouddebuggerpb.RegisterDebuggeeResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -176,7 +177,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
|
|||
// until the controller removes them from the active list to avoid
|
||||
// setting those breakpoints again.
|
||||
func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...)
|
||||
var resp *clouddebuggerpb.ListActiveBreakpointsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -199,7 +200,7 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
|
|||
// semantics. These may only make changes such as canonicalizing a value
|
||||
// or snapping the location to the correct line of code.
|
||||
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.UpdateActiveBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
15
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
generated
vendored
15
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// Debugger2CallOptions contains the retry settings for each method of Debugger2Client.
|
||||
|
@ -81,7 +82,7 @@ type Debugger2Client struct {
|
|||
CallOptions *Debugger2CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewDebugger2Client creates a new debugger2 client.
|
||||
|
@ -130,12 +131,12 @@ func (c *Debugger2Client) Close() error {
|
|||
func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// SetBreakpoint sets the breakpoint to the debuggee.
|
||||
func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.SetBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -151,7 +152,7 @@ func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerp
|
|||
|
||||
// GetBreakpoint gets breakpoint information.
|
||||
func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.GetBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -167,7 +168,7 @@ func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerp
|
|||
|
||||
// DeleteBreakpoint deletes the breakpoint from the debuggee.
|
||||
func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -179,7 +180,7 @@ func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebugg
|
|||
|
||||
// ListBreakpoints lists all breakpoints for the debuggee.
|
||||
func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...)
|
||||
var resp *clouddebuggerpb.ListBreakpointsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -195,7 +196,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
|
|||
|
||||
// ListDebuggees lists all the debuggees that the user has access to.
|
||||
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)
|
||||
var resp *clouddebuggerpb.ListDebuggeesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
18
vendor/cloud.google.com/go/debugger/apiv2/doc.go
generated
vendored
18
vendor/cloud.google.com/go/debugger/apiv2/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package debugger is an experimental, auto-generated package for the
|
||||
// Package debugger is an auto-generated package for the
|
||||
// Stackdriver Debugger API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// Examines the call stack and variables of a running application
|
||||
// without stopping or slowing it down.
|
||||
//
|
||||
|
@ -28,11 +30,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
126
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
generated
vendored
126
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
generated
vendored
|
@ -30,10 +30,13 @@ import (
|
|||
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
DeidentifyContent []gax.CallOption
|
||||
AnalyzeDataSourceRisk []gax.CallOption
|
||||
InspectContent []gax.CallOption
|
||||
RedactContent []gax.CallOption
|
||||
CreateInspectOperation []gax.CallOption
|
||||
|
@ -65,6 +68,8 @@ func defaultCallOptions() *CallOptions {
|
|||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
|
||||
AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}],
|
||||
InspectContent: retry[[2]string{"default", "non_idempotent"}],
|
||||
RedactContent: retry[[2]string{"default", "non_idempotent"}],
|
||||
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
|
||||
|
@ -91,7 +96,7 @@ type Client struct {
|
|||
CallOptions *CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new dlp service client.
|
||||
|
@ -145,7 +150,7 @@ func (c *Client) Close() error {
|
|||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ResultPath returns the path for the result resource.
|
||||
|
@ -156,10 +161,46 @@ func ResultPath(result string) string {
|
|||
""
|
||||
}
|
||||
|
||||
// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
|
||||
// This method has limits on input size and output size.
|
||||
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
|
||||
var resp *dlppb.DeidentifyContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
|
||||
// Cloud Platform repository.
|
||||
func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &AnalyzeDataSourceRiskOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InspectContent finds potentially sensitive info in a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
|
||||
var resp *dlppb.InspectContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -176,7 +217,7 @@ func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRe
|
|||
// RedactContent redacts potentially sensitive info from a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
|
||||
var resp *dlppb.RedactContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -193,7 +234,7 @@ func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequ
|
|||
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
|
||||
// repository.
|
||||
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -211,7 +252,7 @@ func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateIn
|
|||
|
||||
// ListInspectFindings returns list of results for given inspect operation result set id.
|
||||
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
|
||||
var resp *dlppb.ListInspectFindingsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -227,7 +268,7 @@ func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspect
|
|||
|
||||
// ListInfoTypes returns sensitive information types for given category.
|
||||
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
|
||||
var resp *dlppb.ListInfoTypesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -243,7 +284,7 @@ func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequ
|
|||
|
||||
// ListRootCategories returns the list of root categories of sensitive information.
|
||||
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
|
||||
var resp *dlppb.ListRootCategoriesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -257,6 +298,75 @@ func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCate
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk.
|
||||
type AnalyzeDataSourceRiskOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name.
|
||||
// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process.
|
||||
func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation {
|
||||
return &AnalyzeDataSourceRiskOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
|
||||
var resp dlppb.RiskAnalysisOperationResult
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
|
||||
var resp dlppb.RiskAnalysisOperationResult
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) {
|
||||
var meta dlppb.RiskAnalysisOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
|
||||
type CreateInspectOperationHandle struct {
|
||||
lro *longrunning.Operation
|
||||
|
|
41
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go
generated
vendored
41
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go
generated
vendored
|
@ -32,6 +32,47 @@ func ExampleNewClient() {
|
|||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClient_DeidentifyContent() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.DeidentifyContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeidentifyContent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_AnalyzeDataSourceRisk() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.AnalyzeDataSourceRisk(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_InspectContent() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
|
|
18
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
generated
vendored
18
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package dlp is an experimental, auto-generated package for the
|
||||
// Package dlp is an auto-generated package for the
|
||||
// DLP API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// The Google Data Loss Prevention API provides methods for detection of
|
||||
// privacy-sensitive fragments in text, images, and Google Cloud Platform
|
||||
// storage repositories.
|
||||
|
@ -27,11 +29,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
332
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go
generated
vendored
332
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go
generated
vendored
|
@ -85,6 +85,18 @@ func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactCont
|
|||
return s.resps[0].(*dlppb.RedactContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.DeidentifyContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
|
@ -97,6 +109,18 @@ func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.C
|
|||
return s.resps[0].(*longrunningpb.Operation), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*longrunningpb.Operation), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
|
@ -162,6 +186,156 @@ func TestMain(m *testing.M) {
|
|||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestDlpServiceDeidentifyContent(t *testing.T) {
|
||||
var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var request = &dlppb.DeidentifyContentRequest{
|
||||
DeidentifyConfig: deidentifyConfig,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.DeidentifyContent(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceDeidentifyContentError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var request = &dlppb.DeidentifyContentRequest{
|
||||
DeidentifyConfig: deidentifyConfig,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.DeidentifyContent(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
|
||||
var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
any, err := ptypes.MarshalAny(expectedResponse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Response{Response: any},
|
||||
})
|
||||
|
||||
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
|
||||
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
|
||||
var request = &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
PrivacyMetric: privacyMetric,
|
||||
SourceTable: sourceTable,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = nil
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Error{
|
||||
Error: &status.Status{
|
||||
Code: int32(errCode),
|
||||
Message: "test error",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
|
||||
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
|
||||
var request = &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
PrivacyMetric: privacyMetric,
|
||||
SourceTable: sourceTable,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceInspectContent(t *testing.T) {
|
||||
var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
|
||||
|
||||
|
@ -170,8 +344,23 @@ func TestDlpServiceInspectContent(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
|
@ -201,8 +390,23 @@ func TestDlpServiceInspectContentError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
|
@ -230,9 +434,33 @@ func TestDlpServiceRedactContent(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var name2 string = "EMAIL_ADDRESS"
|
||||
var infoType = &dlppb.InfoType{
|
||||
Name: name2,
|
||||
}
|
||||
var replaceWith string = "REDACTED"
|
||||
var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
|
||||
InfoType: infoType,
|
||||
ReplaceWith: replaceWith,
|
||||
}
|
||||
var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
|
@ -263,9 +491,33 @@ func TestDlpServiceRedactContentError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var name2 string = "EMAIL_ADDRESS"
|
||||
var infoType = &dlppb.InfoType{
|
||||
Name: name2,
|
||||
}
|
||||
var replaceWith string = "REDACTED"
|
||||
var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
|
||||
InfoType: infoType,
|
||||
ReplaceWith: replaceWith,
|
||||
}
|
||||
var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
|
@ -287,9 +539,9 @@ func TestDlpServiceRedactContentError(t *testing.T) {
|
|||
_ = resp
|
||||
}
|
||||
func TestDlpServiceCreateInspectOperation(t *testing.T) {
|
||||
var name string = "name3373707"
|
||||
var name2 string = "name2-1052831874"
|
||||
var expectedResponse = &dlppb.InspectOperationResult{
|
||||
Name: name,
|
||||
Name: name2,
|
||||
}
|
||||
|
||||
mockDlp.err = nil
|
||||
|
@ -305,8 +557,26 @@ func TestDlpServiceCreateInspectOperation(t *testing.T) {
|
|||
Result: &longrunningpb.Operation_Response{Response: any},
|
||||
})
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var url string = "gs://example_bucket/example_file.png"
|
||||
var fileSet = &dlppb.CloudStorageOptions_FileSet{
|
||||
Url: url,
|
||||
}
|
||||
var cloudStorageOptions = &dlppb.CloudStorageOptions{
|
||||
FileSet: fileSet,
|
||||
}
|
||||
var storageConfig = &dlppb.StorageConfig{
|
||||
Type: &dlppb.StorageConfig_CloudStorageOptions{
|
||||
CloudStorageOptions: cloudStorageOptions,
|
||||
},
|
||||
}
|
||||
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
|
||||
var request = &dlppb.CreateInspectOperationRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
|
@ -352,8 +622,26 @@ func TestDlpServiceCreateInspectOperationError(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var url string = "gs://example_bucket/example_file.png"
|
||||
var fileSet = &dlppb.CloudStorageOptions_FileSet{
|
||||
Url: url,
|
||||
}
|
||||
var cloudStorageOptions = &dlppb.CloudStorageOptions{
|
||||
FileSet: fileSet,
|
||||
}
|
||||
var storageConfig = &dlppb.StorageConfig{
|
||||
Type: &dlppb.StorageConfig_CloudStorageOptions{
|
||||
CloudStorageOptions: cloudStorageOptions,
|
||||
},
|
||||
}
|
||||
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
|
||||
var request = &dlppb.CreateInspectOperationRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
|
@ -446,8 +734,8 @@ func TestDlpServiceListInfoTypes(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var category string = "category50511102"
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var category string = "PII"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListInfoTypesRequest{
|
||||
Category: category,
|
||||
LanguageCode: languageCode,
|
||||
|
@ -477,8 +765,8 @@ func TestDlpServiceListInfoTypesError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var category string = "category50511102"
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var category string = "PII"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListInfoTypesRequest{
|
||||
Category: category,
|
||||
LanguageCode: languageCode,
|
||||
|
@ -506,7 +794,7 @@ func TestDlpServiceListRootCategories(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListRootCategoriesRequest{
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
@ -535,7 +823,7 @@ func TestDlpServiceListRootCategoriesError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListRootCategoriesRequest{
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
|
18
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
generated
vendored
18
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package errorreporting is an experimental, auto-generated package for the
|
||||
// Package errorreporting is an auto-generated package for the
|
||||
// Stackdriver Error Reporting API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// Stackdriver Error Reporting groups and counts similar errors from cloud
|
||||
// services. The Stackdriver Error Reporting API provides a way to report new
|
||||
// errors and read access to error groups and their associated errors.
|
||||
|
@ -29,11 +31,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
9
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
generated
vendored
9
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
|
||||
|
@ -75,7 +76,7 @@ type ErrorGroupClient struct {
|
|||
CallOptions *ErrorGroupCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewErrorGroupClient creates a new error group service client.
|
||||
|
@ -113,7 +114,7 @@ func (c *ErrorGroupClient) Close() error {
|
|||
func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ErrorGroupGroupPath returns the path for the group resource.
|
||||
|
@ -128,7 +129,7 @@ func ErrorGroupGroupPath(project, group string) string {
|
|||
|
||||
// GetGroup get the specified group.
|
||||
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
|
||||
var resp *clouderrorreportingpb.ErrorGroup
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -145,7 +146,7 @@ func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportin
|
|||
// UpdateGroup replace the data for the specified group.
|
||||
// Fails if the group does not exist.
|
||||
func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
|
||||
var resp *clouderrorreportingpb.ErrorGroup
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
11
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
generated
vendored
11
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
generated
vendored
|
@ -29,6 +29,7 @@ import (
|
|||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
|
||||
|
@ -79,7 +80,7 @@ type ErrorStatsClient struct {
|
|||
CallOptions *ErrorStatsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewErrorStatsClient creates a new error stats service client.
|
||||
|
@ -118,7 +119,7 @@ func (c *ErrorStatsClient) Close() error {
|
|||
func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ErrorStatsProjectPath returns the path for the project resource.
|
||||
|
@ -131,7 +132,7 @@ func ErrorStatsProjectPath(project string) string {
|
|||
|
||||
// ListGroupStats lists the specified groups.
|
||||
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...)
|
||||
it := &ErrorGroupStatsIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) {
|
||||
|
@ -166,7 +167,7 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre
|
|||
|
||||
// ListEvents lists the specified events.
|
||||
func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...)
|
||||
it := &ErrorEventIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) {
|
||||
|
@ -201,7 +202,7 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport
|
|||
|
||||
// DeleteEvents deletes all error events of a given project.
|
||||
func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...)
|
||||
var resp *clouderrorreportingpb.DeleteEventsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
7
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
generated
vendored
7
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"google.golang.org/api/transport"
|
||||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
|
||||
|
@ -57,7 +58,7 @@ type ReportErrorsClient struct {
|
|||
CallOptions *ReportErrorsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewReportErrorsClient creates a new report errors service client.
|
||||
|
@ -95,7 +96,7 @@ func (c *ReportErrorsClient) Close() error {
|
|||
func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ReportErrorsProjectPath returns the path for the project resource.
|
||||
|
@ -114,7 +115,7 @@ func ReportErrorsProjectPath(project string) string {
|
|||
// for authentication. To use an API key, append it to the URL as the value of
|
||||
// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
|
||||
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)
|
||||
var resp *clouderrorreportingpb.ReportErrorEventResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
19
vendor/cloud.google.com/go/errorreporting/errors.go
generated
vendored
19
vendor/cloud.google.com/go/errorreporting/errors.go
generated
vendored
|
@ -17,25 +17,6 @@
|
|||
// This package is still experimental and subject to change.
|
||||
//
|
||||
// See https://cloud.google.com/error-reporting/ for more information.
|
||||
//
|
||||
// To initialize a client, use the NewClient function.
|
||||
//
|
||||
// import er "cloud.google.com/go/errorreporting"
|
||||
// ...
|
||||
// errorsClient, err = er.NewClient(ctx, projectID, er.Config{
|
||||
// ServiceName: "myservice",
|
||||
// ServiceVersion: "v1.0",
|
||||
// })
|
||||
//
|
||||
// With a client, you can then report errors:
|
||||
//
|
||||
// if err != nil {
|
||||
// errorsClient.Report(ctx, er.Entry{Error: err})
|
||||
// }
|
||||
//
|
||||
// If you try to write an error report with a nil client, or if the client
|
||||
// fails to write the report to the server, the error report is logged using
|
||||
// log.Println.
|
||||
package errorreporting // import "cloud.google.com/go/errorreporting"
|
||||
|
||||
import (
|
||||
|
|
49
vendor/cloud.google.com/go/errorreporting/example_test.go
generated
vendored
Normal file
49
vendor/cloud.google.com/go/errorreporting/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errorreporting_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/errorreporting"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create the client.
|
||||
ctx := context.Background()
|
||||
ec, err := errorreporting.NewClient(ctx, "my-gcp-project", errorreporting.Config{
|
||||
ServiceName: "myservice",
|
||||
ServiceVersion: "v1.0",
|
||||
})
|
||||
defer func() {
|
||||
if err := ec.Close(); err != nil {
|
||||
log.Printf("failed to report errors to Stackdriver: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Report an error.
|
||||
err = doSomething()
|
||||
if err != nil {
|
||||
ec.Report(errorreporting.Entry{
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func doSomething() error {
|
||||
return errors.New("something went wrong")
|
||||
}
|
16
vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go
generated
vendored
16
vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go
generated
vendored
|
@ -21,18 +21,22 @@
|
|||
//
|
||||
//
|
||||
// Use the client at cloud.google.com/go/firestore in preference to this.
|
||||
package firestore
|
||||
package firestore // import "cloud.google.com/go/firestore/apiv1beta1"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
34
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go
generated
vendored
34
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go
generated
vendored
|
@ -21,15 +21,15 @@ import (
|
|||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
|
@ -112,7 +112,7 @@ type Client struct {
|
|||
CallOptions *CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new firestore client.
|
||||
|
@ -168,7 +168,7 @@ func (c *Client) Close() error {
|
|||
func (c *Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// DatabaseRootPath returns the path for the database root resource.
|
||||
|
@ -220,7 +220,7 @@ func AnyPathPath(project, database, document, anyPath string) string {
|
|||
|
||||
// GetDocument gets a single document.
|
||||
func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...)
|
||||
var resp *firestorepb.Document
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -236,7 +236,7 @@ func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRe
|
|||
|
||||
// ListDocuments lists documents.
|
||||
func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...)
|
||||
it := &DocumentIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) {
|
||||
|
@ -271,7 +271,7 @@ func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumen
|
|||
|
||||
// CreateDocument creates a new document.
|
||||
func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...)
|
||||
var resp *firestorepb.Document
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -287,7 +287,7 @@ func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocu
|
|||
|
||||
// UpdateDocument updates or inserts a document.
|
||||
func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...)
|
||||
var resp *firestorepb.Document
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -303,7 +303,7 @@ func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocu
|
|||
|
||||
// DeleteDocument deletes a document.
|
||||
func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -318,7 +318,7 @@ func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocu
|
|||
// Documents returned by this method are not guaranteed to be returned in the
|
||||
// same order that they were requested.
|
||||
func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...)
|
||||
var resp firestorepb.Firestore_BatchGetDocumentsClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -334,7 +334,7 @@ func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGe
|
|||
|
||||
// BeginTransaction starts a new transaction.
|
||||
func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...)
|
||||
var resp *firestorepb.BeginTransactionResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -350,7 +350,7 @@ func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTra
|
|||
|
||||
// Commit commits a transaction, while optionally updating documents.
|
||||
func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...)
|
||||
var resp *firestorepb.CommitResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -366,7 +366,7 @@ func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opt
|
|||
|
||||
// Rollback rolls back a transaction.
|
||||
func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -378,7 +378,7 @@ func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest,
|
|||
|
||||
// RunQuery runs a query.
|
||||
func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...)
|
||||
var resp firestorepb.Firestore_RunQueryClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -394,7 +394,7 @@ func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest,
|
|||
|
||||
// Write streams batches of document updates and deletes, in order.
|
||||
func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...)
|
||||
var resp firestorepb.Firestore_WriteClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -410,7 +410,7 @@ func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb
|
|||
|
||||
// Listen listens to changes.
|
||||
func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...)
|
||||
var resp firestorepb.Firestore_ListenClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -426,7 +426,7 @@ func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorep
|
|||
|
||||
// ListCollectionIds lists all the collection IDs underneath a document.
|
||||
func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...)
|
||||
it := &StringIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
|
||||
|
|
5
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go
generated
vendored
5
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go
generated
vendored
|
@ -19,11 +19,10 @@ package firestore_test
|
|||
import (
|
||||
"io"
|
||||
|
||||
firestore "cloud.google.com/go/firestore/apiv1beta1"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"cloud.google.com/go/firestore/apiv1beta1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
|
|
3
vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go
generated
vendored
3
vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go
generated
vendored
|
@ -17,9 +17,8 @@
|
|||
package firestore
|
||||
|
||||
import (
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
)
|
||||
|
||||
import (
|
||||
|
|
25
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
25
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
|
@ -148,10 +148,10 @@ func (d *DocumentRef) newReplaceWrites(data interface{}, opts []SetOption, p Pre
|
|||
return nil, errNilDocRef
|
||||
}
|
||||
origFieldPaths, allPaths, err := processSetOptions(opts)
|
||||
isMerge := len(origFieldPaths) > 0 || allPaths // was some Merge option specified?
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isMerge := len(origFieldPaths) > 0 || allPaths // was some Merge option specified?
|
||||
doc, serverTimestampPaths, err := toProtoDocument(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -214,7 +214,7 @@ func (d *DocumentRef) newReplaceWrites(data interface{}, opts []SetOption, p Pre
|
|||
// There were field paths, but they all got removed.
|
||||
// The write does nothing but enforce the precondition.
|
||||
w = &pb.Write{CurrentDocument: pc}
|
||||
case !isMerge:
|
||||
case !isMerge && (pc != nil || doc.Fields != nil):
|
||||
// Set without merge, so no update mask.
|
||||
w = &pb.Write{
|
||||
Operation: &pb.Write_Update{doc},
|
||||
|
@ -411,19 +411,30 @@ func (d *DocumentRef) newTransform(serverTimestampFieldPaths []FieldPath) *pb.Wr
|
|||
}
|
||||
}
|
||||
|
||||
var (
|
||||
type sentinel int
|
||||
|
||||
const (
|
||||
// Delete is used as a value in a call to UpdateMap to indicate that the
|
||||
// corresponding key should be deleted.
|
||||
Delete = new(int)
|
||||
// Not new(struct{}), because addresses of zero-sized values
|
||||
// may not be unique.
|
||||
Delete sentinel = iota
|
||||
|
||||
// ServerTimestamp is used as a value in a call to UpdateMap to indicate that the
|
||||
// key's value should be set to the time at which the server processed
|
||||
// the request.
|
||||
ServerTimestamp = new(int)
|
||||
ServerTimestamp
|
||||
)
|
||||
|
||||
func (s sentinel) String() string {
|
||||
switch s {
|
||||
case Delete:
|
||||
return "Delete"
|
||||
case ServerTimestamp:
|
||||
return "ServerTimestamp"
|
||||
default:
|
||||
return "<?sentinel?>"
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateMap updates the document using the given data. Map keys replace the stored
|
||||
// values, but other fields of the stored document are untouched.
|
||||
// See DocumentRef.Create for acceptable map values.
|
||||
|
|
20
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
20
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
|
@ -162,14 +162,26 @@ func TestDocSet(t *testing.T) {
|
|||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "a ServerTimestamp alone",
|
||||
data: map[string]interface{}{"b": ServerTimestamp},
|
||||
write: nil,
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "a ServerTimestamp alone with a path",
|
||||
data: map[string]interface{}{"b": ServerTimestamp},
|
||||
opt: MergePaths([]string{"b"}),
|
||||
write: nil,
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "nested ServerTimestamp field",
|
||||
data: map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": map[string]interface{}{"c": ServerTimestamp},
|
||||
},
|
||||
// TODO(jba): make this be map[string]*pb.Value{"a": intval(1)},
|
||||
write: map[string]*pb.Value{"a": intval(1), "b": mapval(map[string]*pb.Value{})},
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
transform: []string{"b.c"},
|
||||
},
|
||||
{
|
||||
|
@ -179,9 +191,7 @@ func TestDocSet(t *testing.T) {
|
|||
"b": ServerTimestamp,
|
||||
"c": map[string]interface{}{"d": ServerTimestamp},
|
||||
},
|
||||
// TODO(jba): make this be map[string]*pb.Value{"a": intval(1)},
|
||||
write: map[string]*pb.Value{"a": intval(1),
|
||||
"c": mapval(map[string]*pb.Value{})},
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
transform: []string{"b", "c.d"},
|
||||
},
|
||||
{
|
||||
|
|
23
vendor/cloud.google.com/go/firestore/document.go
generated
vendored
23
vendor/cloud.google.com/go/firestore/document.go
generated
vendored
|
@ -156,19 +156,26 @@ func toProtoDocument(x interface{}) (*pb.Document, []FieldPath, error) {
|
|||
return nil, nil, errors.New("firestore: nil document contents")
|
||||
}
|
||||
v := reflect.ValueOf(x)
|
||||
pv, err := toProtoValue(v)
|
||||
pv, sawTransform, err := toProtoValue(v)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fieldPaths, err := extractTransformPaths(v, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
var fieldPaths []FieldPath
|
||||
if sawTransform {
|
||||
fieldPaths, err = extractTransformPaths(v, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
m := pv.GetMapValue()
|
||||
if m == nil {
|
||||
return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x)
|
||||
var fields map[string]*pb.Value
|
||||
if pv != nil {
|
||||
m := pv.GetMapValue()
|
||||
if m == nil {
|
||||
return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x)
|
||||
}
|
||||
fields = m.Fields
|
||||
}
|
||||
return &pb.Document{Fields: m.Fields}, fieldPaths, nil
|
||||
return &pb.Document{Fields: fields}, fieldPaths, nil
|
||||
}
|
||||
|
||||
func extractTransformPaths(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
|
||||
|
|
9
vendor/cloud.google.com/go/firestore/integration_test.go
generated
vendored
9
vendor/cloud.google.com/go/firestore/integration_test.go
generated
vendored
|
@ -217,6 +217,9 @@ func TestIntegration_Create(t *testing.T) {
|
|||
checkTimeBetween(t, wr.UpdateTime, start, end)
|
||||
_, err := doc.Create(ctx, integrationTestMap)
|
||||
codeEq(t, "Create on a present doc", codes.AlreadyExists, err)
|
||||
// OK to create an empty document.
|
||||
_, err = integrationColl(t).NewDoc().Create(ctx, map[string]interface{}{})
|
||||
codeEq(t, "Create empty doc", codes.OK, err)
|
||||
}
|
||||
|
||||
func TestIntegration_Get(t *testing.T) {
|
||||
|
@ -234,8 +237,6 @@ func TestIntegration_Get(t *testing.T) {
|
|||
if want := wantIntegrationTestMap; !testEqual(got, want) {
|
||||
t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
|
||||
//
|
||||
_, err = integrationColl(t).NewDoc().Get(ctx)
|
||||
codeEq(t, "Get on a missing doc", codes.NotFound, err)
|
||||
}
|
||||
|
@ -425,6 +426,8 @@ func TestIntegration_UpdateMap(t *testing.T) {
|
|||
er(doc.UpdateMap(ctx, um, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond)))))
|
||||
codeEq(t, "UpdateMap with right LastUpdateTime", codes.OK,
|
||||
er(doc.UpdateMap(ctx, um, LastUpdateTime(wr.UpdateTime))))
|
||||
codeEq(t, "just server transform", codes.OK,
|
||||
er(doc.UpdateMap(ctx, map[string]interface{}{"a": ServerTimestamp})))
|
||||
}
|
||||
|
||||
func TestIntegration_UpdateStruct(t *testing.T) {
|
||||
|
@ -945,7 +948,7 @@ func copyMap(m map[string]interface{}) map[string]interface{} {
|
|||
|
||||
func checkTimeBetween(t *testing.T, got, low, high time.Time) {
|
||||
// Allow slack for clock skew.
|
||||
const slack = 1 * time.Second
|
||||
const slack = 2 * time.Second
|
||||
low = low.Add(-slack)
|
||||
high = high.Add(slack)
|
||||
if got.Before(low) || got.After(high) {
|
||||
|
|
5
vendor/cloud.google.com/go/firestore/mock_test.go
generated
vendored
5
vendor/cloud.google.com/go/firestore/mock_test.go
generated
vendored
|
@ -83,8 +83,9 @@ func (s *mockServer) popRPC(gotReq proto.Message) (interface{}, error) {
|
|||
ri.adjust(gotReq)
|
||||
}
|
||||
if !proto.Equal(gotReq, ri.wantReq) {
|
||||
return nil, fmt.Errorf("mockServer: bad request\ngot: %T\n%+v\nwant: %T\n%+v",
|
||||
gotReq, gotReq, ri.wantReq, ri.wantReq)
|
||||
return nil, fmt.Errorf("mockServer: bad request\ngot: %T\n%s\nwant: %T\n%s",
|
||||
gotReq, proto.MarshalTextString(gotReq),
|
||||
ri.wantReq, proto.MarshalTextString(ri.wantReq))
|
||||
}
|
||||
}
|
||||
resp := s.resps[0]
|
||||
|
|
11
vendor/cloud.google.com/go/firestore/query.go
generated
vendored
11
vendor/cloud.google.com/go/firestore/query.go
generated
vendored
|
@ -277,10 +277,14 @@ func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error)
|
|||
}
|
||||
vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.parentPath + "/documents/" + q.collectionID + "/" + docID}}
|
||||
} else {
|
||||
vals[i], err = toProtoValue(reflect.ValueOf(fval))
|
||||
var sawTransform bool
|
||||
vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sawTransform {
|
||||
return nil, errors.New("firestore: ServerTimestamp disallowed in query value")
|
||||
}
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
|
@ -311,10 +315,13 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
|
|||
default:
|
||||
return nil, fmt.Errorf("firestore: invalid operator %q", f.op)
|
||||
}
|
||||
val, err := toProtoValue(reflect.ValueOf(f.value))
|
||||
val, sawTransform, err := toProtoValue(reflect.ValueOf(f.value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sawTransform {
|
||||
return nil, errors.New("firestore: ServerTimestamp disallowed in query value")
|
||||
}
|
||||
return &pb.StructuredQuery_Filter{
|
||||
FilterType: &pb.StructuredQuery_Filter_FieldFilter{
|
||||
&pb.StructuredQuery_FieldFilter{
|
||||
|
|
8
vendor/cloud.google.com/go/firestore/query_test.go
generated
vendored
8
vendor/cloud.google.com/go/firestore/query_test.go
generated
vendored
|
@ -228,6 +228,8 @@ func fref1(s string) *pb.StructuredQuery_FieldReference {
|
|||
}
|
||||
|
||||
func TestQueryToProtoErrors(t *testing.T) {
|
||||
st := map[string]interface{}{"a": ServerTimestamp}
|
||||
del := map[string]interface{}{"a": Delete}
|
||||
q := (&Client{}).Collection("C").Query
|
||||
for _, query := range []Query{
|
||||
Query{}, // no collection ID
|
||||
|
@ -240,6 +242,12 @@ func TestQueryToProtoErrors(t *testing.T) {
|
|||
q.SelectPaths([]string{"/", "", "~"}), // invalid path
|
||||
q.OrderBy("[", Asc), // invalid path
|
||||
q.OrderByPath([]string{""}, Desc), // invalid path
|
||||
q.Where("x", "==", st), // ServerTimestamp in filter
|
||||
q.OrderBy("a", Asc).StartAt(st), // ServerTimestamp in Start
|
||||
q.OrderBy("a", Asc).EndAt(st), // ServerTimestamp in End
|
||||
q.Where("x", "==", del), // Delete in filter
|
||||
q.OrderBy("a", Asc).StartAt(del), // Delete in Start
|
||||
q.OrderBy("a", Asc).EndAt(del), // Delete in End
|
||||
} {
|
||||
_, err := query.toProto()
|
||||
if err == nil {
|
||||
|
|
106
vendor/cloud.google.com/go/firestore/to_value.go
generated
vendored
106
vendor/cloud.google.com/go/firestore/to_value.go
generated
vendored
|
@ -41,40 +41,44 @@ var (
|
|||
// toProtoValue converts a Go value to a Firestore Value protobuf.
|
||||
// Some corner cases:
|
||||
// - All nils (nil interface, nil slice, nil map, nil pointer) are converted to
|
||||
// a NullValue (not a nil *pb.Value). toProtoValue never returns (nil, nil).
|
||||
// a NullValue (not a nil *pb.Value). toProtoValue never returns (nil, false, nil).
|
||||
// It returns (nil, true, nil) if everything in the value is ServerTimestamp.
|
||||
// - An error is returned for uintptr, uint and uint64, because Firestore uses
|
||||
// an int64 to represent integral values, and those types can't be properly
|
||||
// represented in an int64.
|
||||
// - An error is returned for the special Delete value.
|
||||
func toProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
func toProtoValue(v reflect.Value) (pbv *pb.Value, sawServerTimestamp bool, err error) {
|
||||
if !v.IsValid() {
|
||||
return nullValue, nil
|
||||
return nullValue, false, nil
|
||||
}
|
||||
vi := v.Interface()
|
||||
if vi == Delete {
|
||||
return nil, errors.New("firestore: cannot use Delete in value")
|
||||
return nil, false, errors.New("firestore: cannot use Delete in value")
|
||||
}
|
||||
if vi == ServerTimestamp {
|
||||
return nil, false, errors.New("firestore: must use ServerTimestamp as a map value")
|
||||
}
|
||||
switch x := vi.(type) {
|
||||
case []byte:
|
||||
return &pb.Value{&pb.Value_BytesValue{x}}, nil
|
||||
return &pb.Value{&pb.Value_BytesValue{x}}, false, nil
|
||||
case time.Time:
|
||||
ts, err := ptypes.TimestampProto(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
return &pb.Value{&pb.Value_TimestampValue{ts}}, nil
|
||||
return &pb.Value{&pb.Value_TimestampValue{ts}}, false, nil
|
||||
case *latlng.LatLng:
|
||||
if x == nil {
|
||||
// gRPC doesn't like nil oneofs. Use NullValue.
|
||||
return nullValue, nil
|
||||
return nullValue, false, nil
|
||||
}
|
||||
return &pb.Value{&pb.Value_GeoPointValue{x}}, nil
|
||||
return &pb.Value{&pb.Value_GeoPointValue{x}}, false, nil
|
||||
case *DocumentRef:
|
||||
if x == nil {
|
||||
// gRPC doesn't like nil oneofs. Use NullValue.
|
||||
return nullValue, nil
|
||||
return nullValue, false, nil
|
||||
}
|
||||
return &pb.Value{&pb.Value_ReferenceValue{x.Path}}, nil
|
||||
return &pb.Value{&pb.Value_ReferenceValue{x.Path}}, false, nil
|
||||
// Do not add bool, string, int, etc. to this switch; leave them in the
|
||||
// reflect-based switch below. Moving them here would drop support for
|
||||
// types whose underlying types are those primitives.
|
||||
|
@ -83,15 +87,15 @@ func toProtoValue(v reflect.Value) (*pb.Value, error) {
|
|||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return &pb.Value{&pb.Value_BooleanValue{v.Bool()}}, nil
|
||||
return &pb.Value{&pb.Value_BooleanValue{v.Bool()}}, false, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return &pb.Value{&pb.Value_IntegerValue{v.Int()}}, nil
|
||||
return &pb.Value{&pb.Value_IntegerValue{v.Int()}}, false, nil
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return &pb.Value{&pb.Value_IntegerValue{int64(v.Uint())}}, nil
|
||||
return &pb.Value{&pb.Value_IntegerValue{int64(v.Uint())}}, false, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &pb.Value{&pb.Value_DoubleValue{v.Float()}}, nil
|
||||
return &pb.Value{&pb.Value_DoubleValue{v.Float()}}, false, nil
|
||||
case reflect.String:
|
||||
return &pb.Value{&pb.Value_StringValue{v.String()}}, nil
|
||||
return &pb.Value{&pb.Value_StringValue{v.String()}}, false, nil
|
||||
case reflect.Slice:
|
||||
return sliceToProtoValue(v)
|
||||
case reflect.Map:
|
||||
|
@ -100,7 +104,7 @@ func toProtoValue(v reflect.Value) (*pb.Value, error) {
|
|||
return structToProtoValue(v)
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return nullValue, nil
|
||||
return nullValue, false, nil
|
||||
}
|
||||
return toProtoValue(v.Elem())
|
||||
case reflect.Interface:
|
||||
|
@ -110,71 +114,105 @@ func toProtoValue(v reflect.Value) (*pb.Value, error) {
|
|||
fallthrough // any other interface value is an error
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: cannot convert type %s to value", v.Type())
|
||||
return nil, false, fmt.Errorf("firestore: cannot convert type %s to value", v.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
func sliceToProtoValue(v reflect.Value) (*pb.Value, bool, error) {
|
||||
// A nil slice is converted to a null value.
|
||||
if v.IsNil() {
|
||||
return nullValue, nil
|
||||
return nullValue, false, nil
|
||||
}
|
||||
vals := make([]*pb.Value, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := toProtoValue(v.Index(i))
|
||||
val, sawServerTimestamp, err := toProtoValue(v.Index(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
if sawServerTimestamp {
|
||||
return nil, false, errors.New("firestore: ServerTimestamp cannot occur in an array")
|
||||
}
|
||||
vals[i] = val
|
||||
}
|
||||
return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{vals}}}, nil
|
||||
return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{vals}}}, false, nil
|
||||
}
|
||||
|
||||
func mapToProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
func mapToProtoValue(v reflect.Value) (*pb.Value, bool, error) {
|
||||
if v.Type().Key().Kind() != reflect.String {
|
||||
return nil, errors.New("firestore: map key type must be string")
|
||||
return nil, false, errors.New("firestore: map key type must be string")
|
||||
}
|
||||
// A nil map is converted to a null value.
|
||||
if v.IsNil() {
|
||||
return nullValue, nil
|
||||
return nullValue, false, nil
|
||||
}
|
||||
m := map[string]*pb.Value{}
|
||||
sawServerTimestamp := false
|
||||
for _, k := range v.MapKeys() {
|
||||
mi := v.MapIndex(k)
|
||||
if mi.Interface() == ServerTimestamp {
|
||||
sawServerTimestamp = true
|
||||
continue
|
||||
}
|
||||
val, err := toProtoValue(mi)
|
||||
val, sst, err := toProtoValue(mi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
if sst {
|
||||
sawServerTimestamp = true
|
||||
}
|
||||
if val == nil { // value was a map with all ServerTimestamp values
|
||||
continue
|
||||
}
|
||||
m[k.String()] = val
|
||||
}
|
||||
return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}, nil
|
||||
var pv *pb.Value
|
||||
if len(m) == 0 && sawServerTimestamp {
|
||||
// The entire map consisted of ServerTimestamp values.
|
||||
pv = nil
|
||||
} else {
|
||||
pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}
|
||||
}
|
||||
return pv, sawServerTimestamp, nil
|
||||
}
|
||||
|
||||
func structToProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
func structToProtoValue(v reflect.Value) (*pb.Value, bool, error) {
|
||||
m := map[string]*pb.Value{}
|
||||
fields, err := fieldCache.Fields(v.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
sawServerTimestamp := false
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
opts := f.ParsedTag.(tagOptions)
|
||||
if opts.serverTimestamp {
|
||||
// TODO(jba): should we return a non-zero time?
|
||||
sawServerTimestamp = true
|
||||
continue
|
||||
}
|
||||
if opts.omitEmpty && isEmptyValue(fv) {
|
||||
continue
|
||||
}
|
||||
val, err := toProtoValue(fv)
|
||||
val, sst, err := toProtoValue(fv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
if sst {
|
||||
sawServerTimestamp = true
|
||||
}
|
||||
if val == nil { // value was a map with all ServerTimestamp values
|
||||
continue
|
||||
}
|
||||
m[f.Name] = val
|
||||
}
|
||||
return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}, nil
|
||||
var pv *pb.Value
|
||||
if len(m) == 0 && sawServerTimestamp {
|
||||
// The entire struct consisted of ServerTimestamp or omitempty values.
|
||||
pv = nil
|
||||
} else {
|
||||
pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}
|
||||
}
|
||||
return pv, sawServerTimestamp, nil
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
|
|
59
vendor/cloud.google.com/go/firestore/to_value_test.go
generated
vendored
59
vendor/cloud.google.com/go/firestore/to_value_test.go
generated
vendored
|
@ -117,8 +117,47 @@ func TestToProtoValue(t *testing.T) {
|
|||
},
|
||||
refval("projects/P/databases/D/documents/c/d"),
|
||||
},
|
||||
// ServerTimestamps are removed, possibly leaving nil.
|
||||
{map[string]interface{}{"a": ServerTimestamp}, nil},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"a": map[string]interface{}{
|
||||
"b": map[string]interface{}{
|
||||
"c": ServerTimestamp,
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"a": map[string]interface{}{
|
||||
"b": map[string]interface{}{
|
||||
"c": ServerTimestamp,
|
||||
"d": ServerTimestamp,
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"a": map[string]interface{}{
|
||||
"b": map[string]interface{}{
|
||||
"c": ServerTimestamp,
|
||||
"d": ServerTimestamp,
|
||||
"e": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
mapval(map[string]*pb.Value{
|
||||
"a": mapval(map[string]*pb.Value{
|
||||
"b": mapval(map[string]*pb.Value{"e": intval(1)}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
} {
|
||||
got, err := toProtoValue(reflect.ValueOf(test.in))
|
||||
got, _, err := toProtoValue(reflect.ValueOf(test.in))
|
||||
if err != nil {
|
||||
t.Errorf("%v (%T): %v", test.in, test.in, err)
|
||||
continue
|
||||
|
@ -139,8 +178,18 @@ func TestToProtoValueErrors(t *testing.T) {
|
|||
map[int]bool{}, // map key type is not string
|
||||
make(chan int), // can't handle type
|
||||
map[string]fmt.Stringer{"a": stringy{}}, // only empty interfaces
|
||||
ServerTimestamp, // ServerTimestamp can only be a field value
|
||||
[]interface{}{ServerTimestamp},
|
||||
map[string]interface{}{"a": []interface{}{ServerTimestamp}},
|
||||
map[string]interface{}{"a": []interface{}{
|
||||
map[string]interface{}{"b": ServerTimestamp},
|
||||
}},
|
||||
Delete, // Delete should never appear
|
||||
[]interface{}{Delete},
|
||||
map[string]interface{}{"a": Delete},
|
||||
map[string]interface{}{"a": []interface{}{Delete}},
|
||||
} {
|
||||
_, err := toProtoValue(reflect.ValueOf(in))
|
||||
_, _, err := toProtoValue(reflect.ValueOf(in))
|
||||
if err == nil {
|
||||
t.Errorf("%v: got nil, want error", in)
|
||||
}
|
||||
|
@ -161,7 +210,7 @@ func TestToProtoValueTags(t *testing.T) {
|
|||
OmitEmpty: 3,
|
||||
OmitEmptyTime: aTime,
|
||||
}
|
||||
got, err := toProtoValue(reflect.ValueOf(in))
|
||||
got, _, err := toProtoValue(reflect.ValueOf(in))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -174,7 +223,7 @@ func TestToProtoValueTags(t *testing.T) {
|
|||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
|
||||
got, err = toProtoValue(reflect.ValueOf(testStruct2{}))
|
||||
got, _, err = toProtoValue(reflect.ValueOf(testStruct2{}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -191,7 +240,7 @@ func TestToProtoValueEmbedded(t *testing.T) {
|
|||
*latlng.LatLng
|
||||
}
|
||||
|
||||
got, err := toProtoValue(reflect.ValueOf(embed{tm, ll}))
|
||||
got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
16
vendor/cloud.google.com/go/internal/testutil/unique.go
generated
vendored
16
vendor/cloud.google.com/go/internal/testutil/unique.go
generated
vendored
|
@ -31,17 +31,24 @@ var startTime = time.Now().UTC()
|
|||
// A UIDSpace manages a set of unique IDs distinguished by a prefix.
|
||||
type UIDSpace struct {
|
||||
Prefix string
|
||||
Sep rune
|
||||
re *regexp.Regexp
|
||||
mu sync.Mutex
|
||||
count int
|
||||
}
|
||||
|
||||
func NewUIDSpace(prefix string) *UIDSpace {
|
||||
return NewUIDSpaceSep(prefix, '-')
|
||||
}
|
||||
|
||||
func NewUIDSpaceSep(prefix string, sep rune) *UIDSpace {
|
||||
re := fmt.Sprintf(`^%s%[2]c(\d{4})(\d{2})(\d{2})%[2]c(\d+)%[2]c\d+$`,
|
||||
regexp.QuoteMeta(prefix), sep)
|
||||
return &UIDSpace{
|
||||
Prefix: prefix,
|
||||
re: regexp.MustCompile("^" + regexp.QuoteMeta(prefix) + `-(\d{4})(\d{2})(\d{2})-(\d+)-\d+$`),
|
||||
Sep: sep,
|
||||
re: regexp.MustCompile(re),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// New generates a new unique ID . The ID consists of the UIDSpace's prefix, a
|
||||
|
@ -49,7 +56,7 @@ func NewUIDSpace(prefix string) *UIDSpace {
|
|||
// execution will have the same timestamp.
|
||||
//
|
||||
// Aside from the characters in the prefix, IDs contain only letters, numbers
|
||||
// and hyphens.
|
||||
// and sep.
|
||||
func (s *UIDSpace) New() string { return s.newID(startTime) }
|
||||
|
||||
func (s *UIDSpace) newID(t time.Time) string {
|
||||
|
@ -62,7 +69,8 @@ func (s *UIDSpace) newID(t time.Time) string {
|
|||
y, m, d := t.Date()
|
||||
ns := t.Sub(time.Date(y, m, d, 0, 0, 0, 0, time.UTC))
|
||||
// Zero-pad the counter for lexical sort order for IDs with the same timestamp.
|
||||
return fmt.Sprintf("%s-%04d%02d%02d-%d-%04d", s.Prefix, y, m, d, ns, c)
|
||||
return fmt.Sprintf("%s%c%04d%02d%02d%c%d%c%04d",
|
||||
s.Prefix, s.Sep, y, m, d, s.Sep, ns, s.Sep, c)
|
||||
}
|
||||
|
||||
// Timestamp extracts the timestamp of uid, which must have been generated by
|
||||
|
|
7
vendor/cloud.google.com/go/internal/testutil/unique_test.go
generated
vendored
7
vendor/cloud.google.com/go/internal/testutil/unique_test.go
generated
vendored
|
@ -27,6 +27,13 @@ func TestNew(t *testing.T) {
|
|||
if got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
|
||||
s2 := NewUIDSpaceSep("prefix2", '_')
|
||||
got = s2.newID(tm)
|
||||
want = "prefix2_20170106_21_0000"
|
||||
if got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestamp(t *testing.T) {
|
||||
|
|
BIN
vendor/cloud.google.com/go/key.json.enc
generated
vendored
BIN
vendor/cloud.google.com/go/key.json.enc
generated
vendored
Binary file not shown.
BIN
vendor/cloud.google.com/go/keys.tar.enc
generated
vendored
Normal file
BIN
vendor/cloud.google.com/go/keys.tar.enc
generated
vendored
Normal file
Binary file not shown.
18
vendor/cloud.google.com/go/language/apiv1beta2/doc.go
generated
vendored
18
vendor/cloud.google.com/go/language/apiv1beta2/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package language is an experimental, auto-generated package for the
|
||||
// Package language is an auto-generated package for the
|
||||
// Google Cloud Natural Language API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// Google Cloud Natural Language API provides natural language understanding
|
||||
// technologies to developers. Examples include sentiment analysis, entity
|
||||
// recognition, and text annotations.
|
||||
|
@ -27,11 +29,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
17
vendor/cloud.google.com/go/language/apiv1beta2/language_client.go
generated
vendored
17
vendor/cloud.google.com/go/language/apiv1beta2/language_client.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
|
@ -83,7 +84,7 @@ type Client struct {
|
|||
CallOptions *CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new language service client.
|
||||
|
@ -122,12 +123,12 @@ func (c *Client) Close() error {
|
|||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// AnalyzeSentiment analyzes the sentiment of the provided text.
|
||||
func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...)
|
||||
var resp *languagepb.AnalyzeSentimentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -145,7 +146,7 @@ func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSe
|
|||
// along with entity types, salience, mentions for each entity, and
|
||||
// other properties.
|
||||
func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...)
|
||||
var resp *languagepb.AnalyzeEntitiesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -162,7 +163,7 @@ func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEnt
|
|||
// AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes
|
||||
// sentiment associated with each entity and its mentions.
|
||||
func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...)
|
||||
var resp *languagepb.AnalyzeEntitySentimentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -180,7 +181,7 @@ func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.Ana
|
|||
// tokenization along with part of speech tags, dependency trees, and other
|
||||
// properties.
|
||||
func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...)
|
||||
var resp *languagepb.AnalyzeSyntaxResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -196,7 +197,7 @@ func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSynta
|
|||
|
||||
// ClassifyText classifies a document into categories.
|
||||
func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest, opts ...gax.CallOption) (*languagepb.ClassifyTextResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ClassifyText[0:len(c.CallOptions.ClassifyText):len(c.CallOptions.ClassifyText)], opts...)
|
||||
var resp *languagepb.ClassifyTextResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -213,7 +214,7 @@ func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextR
|
|||
// AnnotateText a convenience method that provides all syntax, sentiment, entity, and
|
||||
// classification features in one call.
|
||||
func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...)
|
||||
var resp *languagepb.AnnotateTextResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
205
vendor/cloud.google.com/go/logging/apiv2/config_client.go
generated
vendored
205
vendor/cloud.google.com/go/logging/apiv2/config_client.go
generated
vendored
|
@ -29,15 +29,21 @@ import (
|
|||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ConfigCallOptions contains the retry settings for each method of ConfigClient.
|
||||
type ConfigCallOptions struct {
|
||||
ListSinks []gax.CallOption
|
||||
GetSink []gax.CallOption
|
||||
CreateSink []gax.CallOption
|
||||
UpdateSink []gax.CallOption
|
||||
DeleteSink []gax.CallOption
|
||||
ListSinks []gax.CallOption
|
||||
GetSink []gax.CallOption
|
||||
CreateSink []gax.CallOption
|
||||
UpdateSink []gax.CallOption
|
||||
DeleteSink []gax.CallOption
|
||||
ListExclusions []gax.CallOption
|
||||
GetExclusion []gax.CallOption
|
||||
CreateExclusion []gax.CallOption
|
||||
UpdateExclusion []gax.CallOption
|
||||
DeleteExclusion []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultConfigClientOptions() []option.ClientOption {
|
||||
|
@ -64,11 +70,16 @@ func defaultConfigCallOptions() *ConfigCallOptions {
|
|||
},
|
||||
}
|
||||
return &ConfigCallOptions{
|
||||
ListSinks: retry[[2]string{"default", "idempotent"}],
|
||||
GetSink: retry[[2]string{"default", "idempotent"}],
|
||||
CreateSink: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateSink: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteSink: retry[[2]string{"default", "idempotent"}],
|
||||
ListSinks: retry[[2]string{"default", "idempotent"}],
|
||||
GetSink: retry[[2]string{"default", "idempotent"}],
|
||||
CreateSink: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateSink: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteSink: retry[[2]string{"default", "idempotent"}],
|
||||
ListExclusions: retry[[2]string{"default", "idempotent"}],
|
||||
GetExclusion: retry[[2]string{"default", "idempotent"}],
|
||||
CreateExclusion: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateExclusion: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteExclusion: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,7 +95,7 @@ type ConfigClient struct {
|
|||
CallOptions *ConfigCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewConfigClient creates a new config service v2 client.
|
||||
|
@ -123,7 +134,7 @@ func (c *ConfigClient) Close() error {
|
|||
func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ConfigProjectPath returns the path for the project resource.
|
||||
|
@ -144,9 +155,19 @@ func ConfigSinkPath(project, sink string) string {
|
|||
""
|
||||
}
|
||||
|
||||
// ConfigExclusionPath returns the path for the exclusion resource.
|
||||
func ConfigExclusionPath(project, exclusion string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/exclusions/" +
|
||||
exclusion +
|
||||
""
|
||||
}
|
||||
|
||||
// ListSinks lists sinks.
|
||||
func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...)
|
||||
it := &LogSinkIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) {
|
||||
|
@ -181,7 +202,7 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe
|
|||
|
||||
// GetSink gets a sink.
|
||||
func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...)
|
||||
var resp *loggingpb.LogSink
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -201,7 +222,7 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques
|
|||
// writer_identity is not permitted to write to the destination. A sink can
|
||||
// export log entries only from the resource owning the sink.
|
||||
func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...)
|
||||
var resp *loggingpb.LogSink
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -215,16 +236,13 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateSink updates a sink. If the named sink doesn't exist, then this method is
|
||||
// identical to
|
||||
// sinks.create (at /logging/docs/api/reference/rest/v2/projects.sinks/create).
|
||||
// If the named sink does exist, then this method replaces the following
|
||||
// fields in the existing sink with values from the new sink: destination,
|
||||
// filter, output_version_format, start_time, and end_time.
|
||||
// The updated filter might also have a new writer_identity; see the
|
||||
// UpdateSink updates a sink. This method replaces the following fields in the existing
|
||||
// sink with values from the new sink: destination, filter,
|
||||
// output_version_format, start_time, and end_time.
|
||||
// The updated sink might also have a new writer_identity; see the
|
||||
// unique_writer_identity field.
|
||||
func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
|
||||
var resp *loggingpb.LogSink
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -241,7 +259,7 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink
|
|||
// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
|
||||
// service account is also deleted.
|
||||
func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -251,6 +269,145 @@ func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSink
|
|||
return err
|
||||
}
|
||||
|
||||
// ListExclusions lists all the exclusions in a parent resource.
|
||||
func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListExclusions[0:len(c.CallOptions.ListExclusions):len(c.CallOptions.ListExclusions)], opts...)
|
||||
it := &LogExclusionIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogExclusion, string, error) {
|
||||
var resp *loggingpb.ListExclusionsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.ListExclusions(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Exclusions, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// GetExclusion gets the description of an exclusion.
|
||||
func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...)
|
||||
var resp *loggingpb.LogExclusion
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.GetExclusion(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateExclusion creates a new exclusion in a specified parent resource.
|
||||
// Only log entries belonging to that resource can be excluded.
|
||||
// You can have up to 10 exclusions in a resource.
|
||||
func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...)
|
||||
var resp *loggingpb.LogExclusion
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.CreateExclusion(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateExclusion changes one or more properties of an existing exclusion.
|
||||
func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...)
|
||||
var resp *loggingpb.LogExclusion
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.UpdateExclusion(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteExclusion deletes an exclusion.
|
||||
func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.configClient.DeleteExclusion(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// LogExclusionIterator manages a stream of *loggingpb.LogExclusion.
|
||||
type LogExclusionIterator struct {
|
||||
items []*loggingpb.LogExclusion
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogExclusion, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *LogExclusionIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *LogExclusionIterator) Next() (*loggingpb.LogExclusion, error) {
|
||||
var item *loggingpb.LogExclusion
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *LogExclusionIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *LogExclusionIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// LogSinkIterator manages a stream of *loggingpb.LogSink.
|
||||
type LogSinkIterator struct {
|
||||
items []*loggingpb.LogSink
|
||||
|
|
94
vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go
generated
vendored
94
vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go
generated
vendored
|
@ -126,3 +126,97 @@ func ExampleConfigClient_DeleteSink() {
|
|||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleConfigClient_ListExclusions() {
|
||||
ctx := context.Background()
|
||||
c, err := logging.NewConfigClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &loggingpb.ListExclusionsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListExclusions(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleConfigClient_GetExclusion() {
|
||||
ctx := context.Background()
|
||||
c, err := logging.NewConfigClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &loggingpb.GetExclusionRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetExclusion(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleConfigClient_CreateExclusion() {
|
||||
ctx := context.Background()
|
||||
c, err := logging.NewConfigClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &loggingpb.CreateExclusionRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateExclusion(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleConfigClient_UpdateExclusion() {
|
||||
ctx := context.Background()
|
||||
c, err := logging.NewConfigClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &loggingpb.UpdateExclusionRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateExclusion(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleConfigClient_DeleteExclusion() {
|
||||
ctx := context.Background()
|
||||
c, err := logging.NewConfigClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &loggingpb.DeleteExclusionRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteExclusion(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
|
18
vendor/cloud.google.com/go/logging/apiv2/doc.go
generated
vendored
18
vendor/cloud.google.com/go/logging/apiv2/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package logging is an experimental, auto-generated package for the
|
||||
// Package logging is an auto-generated package for the
|
||||
// Stackdriver Logging API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// The Stackdriver Logging API lets you write log entries and manage your
|
||||
// logs, log sinks and logs-based metrics.
|
||||
//
|
||||
|
@ -28,11 +30,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
23
vendor/cloud.google.com/go/logging/apiv2/logging_client.go
generated
vendored
23
vendor/cloud.google.com/go/logging/apiv2/logging_client.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
|
@ -98,7 +99,7 @@ type Client struct {
|
|||
CallOptions *CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new logging service v2 client.
|
||||
|
@ -136,7 +137,7 @@ func (c *Client) Close() error {
|
|||
func (c *Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ProjectPath returns the path for the project resource.
|
||||
|
@ -162,7 +163,7 @@ func LogPath(project, log string) string {
|
|||
// Log entries written shortly before the delete operation might not be
|
||||
// deleted.
|
||||
func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -172,9 +173,15 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest,
|
|||
return err
|
||||
}
|
||||
|
||||
// WriteLogEntries writes log entries to Stackdriver Logging.
|
||||
// WriteLogEntries ## Log entry resources
|
||||
//
|
||||
// Writes log entries to Stackdriver Logging. This API method is the
|
||||
// only way to send log entries to Stackdriver Logging. This method
|
||||
// is used, directly or indirectly, by the Stackdriver Logging agent
|
||||
// (fluentd) and all logging libraries configured to use Stackdriver
|
||||
// Logging.
|
||||
func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...)
|
||||
var resp *loggingpb.WriteLogEntriesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -192,7 +199,7 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt
|
|||
// Stackdriver Logging. For ways to export log entries, see
|
||||
// Exporting Logs (at /logging/docs/export).
|
||||
func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)
|
||||
it := &LogEntryIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) {
|
||||
|
@ -228,7 +235,7 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri
|
|||
// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver
|
||||
// Logging.
|
||||
func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...)
|
||||
it := &MonitoredResourceDescriptorIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
|
||||
|
@ -264,7 +271,7 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg
|
|||
// ListLogs lists the logs in projects, organizations, folders, or billing accounts.
|
||||
// Only logs that have entries are listed.
|
||||
func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...)
|
||||
it := &StringIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
|
||||
|
|
15
vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
generated
vendored
15
vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
generated
vendored
|
@ -29,6 +29,7 @@ import (
|
|||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// MetricsCallOptions contains the retry settings for each method of MetricsClient.
|
||||
|
@ -84,7 +85,7 @@ type MetricsClient struct {
|
|||
CallOptions *MetricsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewMetricsClient creates a new metrics service v2 client.
|
||||
|
@ -122,7 +123,7 @@ func (c *MetricsClient) Close() error {
|
|||
func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// MetricsProjectPath returns the path for the project resource.
|
||||
|
@ -145,7 +146,7 @@ func MetricsMetricPath(project, metric string) string {
|
|||
|
||||
// ListLogMetrics lists logs-based metrics.
|
||||
func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...)
|
||||
it := &LogMetricIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) {
|
||||
|
@ -180,7 +181,7 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL
|
|||
|
||||
// GetLogMetric gets a logs-based metric.
|
||||
func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...)
|
||||
var resp *loggingpb.LogMetric
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -196,7 +197,7 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM
|
|||
|
||||
// CreateLogMetric creates a logs-based metric.
|
||||
func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...)
|
||||
var resp *loggingpb.LogMetric
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -212,7 +213,7 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea
|
|||
|
||||
// UpdateLogMetric creates or updates a logs-based metric.
|
||||
func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...)
|
||||
var resp *loggingpb.LogMetric
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -228,7 +229,7 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda
|
|||
|
||||
// DeleteLogMetric deletes a logs-based metric.
|
||||
func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
416
vendor/cloud.google.com/go/logging/apiv2/mock_test.go
generated
vendored
416
vendor/cloud.google.com/go/logging/apiv2/mock_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
field_maskpb "google.golang.org/genproto/protobuf/field_mask"
|
||||
)
|
||||
|
||||
import (
|
||||
|
@ -197,6 +198,66 @@ func (s *mockConfigServer) DeleteSink(ctx context.Context, req *loggingpb.Delete
|
|||
return s.resps[0].(*emptypb.Empty), nil
|
||||
}
|
||||
|
||||
func (s *mockConfigServer) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest) (*loggingpb.ListExclusionsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*loggingpb.ListExclusionsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockConfigServer) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest) (*loggingpb.LogExclusion, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*loggingpb.LogExclusion), nil
|
||||
}
|
||||
|
||||
func (s *mockConfigServer) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest) (*loggingpb.LogExclusion, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*loggingpb.LogExclusion), nil
|
||||
}
|
||||
|
||||
func (s *mockConfigServer) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest) (*loggingpb.LogExclusion, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*loggingpb.LogExclusion), nil
|
||||
}
|
||||
|
||||
func (s *mockConfigServer) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest) (*emptypb.Empty, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*emptypb.Empty), nil
|
||||
}
|
||||
|
||||
type mockMetricsServer struct {
|
||||
// Embed for forward compatibility.
|
||||
// Tests will keep working if more methods are added
|
||||
|
@ -956,6 +1017,337 @@ func TestConfigServiceV2DeleteSinkError(t *testing.T) {
|
|||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
}
|
||||
func TestConfigServiceV2ListExclusions(t *testing.T) {
|
||||
var nextPageToken string = ""
|
||||
var exclusionsElement *loggingpb.LogExclusion = &loggingpb.LogExclusion{}
|
||||
var exclusions = []*loggingpb.LogExclusion{exclusionsElement}
|
||||
var expectedResponse = &loggingpb.ListExclusionsResponse{
|
||||
NextPageToken: nextPageToken,
|
||||
Exclusions: exclusions,
|
||||
}
|
||||
|
||||
mockConfig.err = nil
|
||||
mockConfig.reqs = nil
|
||||
|
||||
mockConfig.resps = append(mockConfig.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = ConfigProjectPath("[PROJECT]")
|
||||
var request = &loggingpb.ListExclusionsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListExclusions(context.Background(), request).Next()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
want := (interface{})(expectedResponse.Exclusions[0])
|
||||
got := (interface{})(resp)
|
||||
var ok bool
|
||||
|
||||
switch want := (want).(type) {
|
||||
case proto.Message:
|
||||
ok = proto.Equal(want, got.(proto.Message))
|
||||
default:
|
||||
ok = want == got
|
||||
}
|
||||
if !ok {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigServiceV2ListExclusionsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockConfig.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = ConfigProjectPath("[PROJECT]")
|
||||
var request = &loggingpb.ListExclusionsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.ListExclusions(context.Background(), request).Next()
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestConfigServiceV2GetExclusion(t *testing.T) {
|
||||
var name2 string = "name2-1052831874"
|
||||
var description string = "description-1724546052"
|
||||
var filter string = "filter-1274492040"
|
||||
var disabled bool = true
|
||||
var expectedResponse = &loggingpb.LogExclusion{
|
||||
Name: name2,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
Disabled: disabled,
|
||||
}
|
||||
|
||||
mockConfig.err = nil
|
||||
mockConfig.reqs = nil
|
||||
|
||||
mockConfig.resps = append(mockConfig.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]")
|
||||
var request = &loggingpb.GetExclusionRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.GetExclusion(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigServiceV2GetExclusionError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockConfig.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]")
|
||||
var request = &loggingpb.GetExclusionRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.GetExclusion(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestConfigServiceV2CreateExclusion(t *testing.T) {
|
||||
var name string = "name3373707"
|
||||
var description string = "description-1724546052"
|
||||
var filter string = "filter-1274492040"
|
||||
var disabled bool = true
|
||||
var expectedResponse = &loggingpb.LogExclusion{
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
Disabled: disabled,
|
||||
}
|
||||
|
||||
mockConfig.err = nil
|
||||
mockConfig.reqs = nil
|
||||
|
||||
mockConfig.resps = append(mockConfig.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = ConfigProjectPath("[PROJECT]")
|
||||
var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{}
|
||||
var request = &loggingpb.CreateExclusionRequest{
|
||||
Parent: formattedParent,
|
||||
Exclusion: exclusion,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.CreateExclusion(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigServiceV2CreateExclusionError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockConfig.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = ConfigProjectPath("[PROJECT]")
|
||||
var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{}
|
||||
var request = &loggingpb.CreateExclusionRequest{
|
||||
Parent: formattedParent,
|
||||
Exclusion: exclusion,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.CreateExclusion(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestConfigServiceV2UpdateExclusion(t *testing.T) {
|
||||
var name2 string = "name2-1052831874"
|
||||
var description string = "description-1724546052"
|
||||
var filter string = "filter-1274492040"
|
||||
var disabled bool = true
|
||||
var expectedResponse = &loggingpb.LogExclusion{
|
||||
Name: name2,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
Disabled: disabled,
|
||||
}
|
||||
|
||||
mockConfig.err = nil
|
||||
mockConfig.reqs = nil
|
||||
|
||||
mockConfig.resps = append(mockConfig.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]")
|
||||
var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{}
|
||||
var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{}
|
||||
var request = &loggingpb.UpdateExclusionRequest{
|
||||
Name: formattedName,
|
||||
Exclusion: exclusion,
|
||||
UpdateMask: updateMask,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.UpdateExclusion(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigServiceV2UpdateExclusionError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockConfig.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]")
|
||||
var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{}
|
||||
var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{}
|
||||
var request = &loggingpb.UpdateExclusionRequest{
|
||||
Name: formattedName,
|
||||
Exclusion: exclusion,
|
||||
UpdateMask: updateMask,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.UpdateExclusion(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestConfigServiceV2DeleteExclusion(t *testing.T) {
|
||||
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
|
||||
|
||||
mockConfig.err = nil
|
||||
mockConfig.reqs = nil
|
||||
|
||||
mockConfig.resps = append(mockConfig.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]")
|
||||
var request = &loggingpb.DeleteExclusionRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = c.DeleteExclusion(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestConfigServiceV2DeleteExclusionError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockConfig.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]")
|
||||
var request = &loggingpb.DeleteExclusionRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
||||
c, err := NewConfigClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = c.DeleteExclusion(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
}
|
||||
func TestMetricsServiceV2ListLogMetrics(t *testing.T) {
|
||||
var nextPageToken string = ""
|
||||
var metricsElement *loggingpb.LogMetric = &loggingpb.LogMetric{}
|
||||
|
@ -1032,10 +1424,12 @@ func TestMetricsServiceV2GetLogMetric(t *testing.T) {
|
|||
var name string = "name3373707"
|
||||
var description string = "description-1724546052"
|
||||
var filter string = "filter-1274492040"
|
||||
var valueExtractor string = "valueExtractor2047672534"
|
||||
var expectedResponse = &loggingpb.LogMetric{
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
ValueExtractor: valueExtractor,
|
||||
}
|
||||
|
||||
mockMetrics.err = nil
|
||||
|
@ -1095,10 +1489,12 @@ func TestMetricsServiceV2CreateLogMetric(t *testing.T) {
|
|||
var name string = "name3373707"
|
||||
var description string = "description-1724546052"
|
||||
var filter string = "filter-1274492040"
|
||||
var valueExtractor string = "valueExtractor2047672534"
|
||||
var expectedResponse = &loggingpb.LogMetric{
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
ValueExtractor: valueExtractor,
|
||||
}
|
||||
|
||||
mockMetrics.err = nil
|
||||
|
@ -1162,10 +1558,12 @@ func TestMetricsServiceV2UpdateLogMetric(t *testing.T) {
|
|||
var name string = "name3373707"
|
||||
var description string = "description-1724546052"
|
||||
var filter string = "filter-1274492040"
|
||||
var valueExtractor string = "valueExtractor2047672534"
|
||||
var expectedResponse = &loggingpb.LogMetric{
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
Name: name,
|
||||
Description: description,
|
||||
Filter: filter,
|
||||
ValueExtractor: valueExtractor,
|
||||
}
|
||||
|
||||
mockMetrics.err = nil
|
||||
|
|
18
vendor/cloud.google.com/go/longrunning/autogen/doc.go
generated
vendored
18
vendor/cloud.google.com/go/longrunning/autogen/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package longrunning is an experimental, auto-generated package for the
|
||||
// Package longrunning is an auto-generated package for the
|
||||
// Google Long Running Operations API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
//
|
||||
// Use the client at cloud.google.com/go/longrunning in preference to this.
|
||||
package longrunning // import "cloud.google.com/go/longrunning/autogen"
|
||||
|
@ -26,11 +28,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
13
vendor/cloud.google.com/go/longrunning/autogen/operations_client.go
generated
vendored
13
vendor/cloud.google.com/go/longrunning/autogen/operations_client.go
generated
vendored
|
@ -29,6 +29,7 @@ import (
|
|||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// OperationsCallOptions contains the retry settings for each method of OperationsClient.
|
||||
|
@ -81,7 +82,7 @@ type OperationsClient struct {
|
|||
CallOptions *OperationsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewOperationsClient creates a new operations client.
|
||||
|
@ -127,14 +128,14 @@ func (c *OperationsClient) Close() error {
|
|||
func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// GetOperation gets the latest state of a long-running operation. Clients can use this
|
||||
// method to poll the operation result at intervals as recommended by the API
|
||||
// service.
|
||||
func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -154,7 +155,7 @@ func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.
|
|||
// NOTE: the name binding below allows API services to override the binding
|
||||
// to use different resource name schemes, such as users/*/operations.
|
||||
func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
|
||||
it := &OperationIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
|
||||
|
@ -198,7 +199,7 @@ func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningp
|
|||
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
|
||||
// corresponding to Code.CANCELLED.
|
||||
func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -213,7 +214,7 @@ func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunning
|
|||
// operation. If the server doesn't support this method, it returns
|
||||
// google.rpc.Code.UNIMPLEMENTED.
|
||||
func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
18
vendor/cloud.google.com/go/monitoring/apiv3/doc.go
generated
vendored
18
vendor/cloud.google.com/go/monitoring/apiv3/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package monitoring is an experimental, auto-generated package for the
|
||||
// Package monitoring is an auto-generated package for the
|
||||
// Stackdriver Monitoring API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// Manages your Stackdriver Monitoring data and configurations. Most projects
|
||||
// must be associated with a Stackdriver account, with a few exceptions as
|
||||
// noted on the individual method pages.
|
||||
|
@ -27,11 +29,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
17
vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
generated
vendored
17
vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// GroupCallOptions contains the retry settings for each method of GroupClient.
|
||||
|
@ -86,7 +87,7 @@ type GroupClient struct {
|
|||
CallOptions *GroupCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewGroupClient creates a new group service client.
|
||||
|
@ -135,7 +136,7 @@ func (c *GroupClient) Close() error {
|
|||
func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// GroupProjectPath returns the path for the project resource.
|
||||
|
@ -158,7 +159,7 @@ func GroupGroupPath(project, group string) string {
|
|||
|
||||
// ListGroups lists the existing groups.
|
||||
func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...)
|
||||
it := &GroupIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) {
|
||||
|
@ -193,7 +194,7 @@ func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGrou
|
|||
|
||||
// GetGroup gets a single group.
|
||||
func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
|
||||
var resp *monitoringpb.Group
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -209,7 +210,7 @@ func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRe
|
|||
|
||||
// CreateGroup creates a new group.
|
||||
func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...)
|
||||
var resp *monitoringpb.Group
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -226,7 +227,7 @@ func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateG
|
|||
// UpdateGroup updates an existing group.
|
||||
// You can change any group attributes except name.
|
||||
func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
|
||||
var resp *monitoringpb.Group
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -242,7 +243,7 @@ func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateG
|
|||
|
||||
// DeleteGroup deletes an existing group.
|
||||
func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -254,7 +255,7 @@ func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteG
|
|||
|
||||
// ListGroupMembers lists the monitored resources that are members of a group.
|
||||
func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...)
|
||||
it := &MonitoredResourceIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) {
|
||||
|
|
21
vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
generated
vendored
21
vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
generated
vendored
|
@ -31,6 +31,7 @@ import (
|
|||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// MetricCallOptions contains the retry settings for each method of MetricClient.
|
||||
|
@ -91,7 +92,7 @@ type MetricClient struct {
|
|||
CallOptions *MetricCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewMetricClient creates a new metric service client.
|
||||
|
@ -130,7 +131,7 @@ func (c *MetricClient) Close() error {
|
|||
func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// MetricProjectPath returns the path for the project resource.
|
||||
|
@ -163,7 +164,7 @@ func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor
|
|||
|
||||
// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
|
||||
func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...)
|
||||
it := &MonitoredResourceDescriptorIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
|
||||
|
@ -198,7 +199,7 @@ func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req
|
|||
|
||||
// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account.
|
||||
func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...)
|
||||
var resp *monitoredrespb.MonitoredResourceDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -214,7 +215,7 @@ func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *
|
|||
|
||||
// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account.
|
||||
func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...)
|
||||
it := &MetricDescriptorIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) {
|
||||
|
@ -249,7 +250,7 @@ func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitorin
|
|||
|
||||
// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account.
|
||||
func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...)
|
||||
var resp *metricpb.MetricDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -267,7 +268,7 @@ func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringp
|
|||
// User-created metric descriptors define
|
||||
// custom metrics (at /monitoring/custom-metrics).
|
||||
func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...)
|
||||
var resp *metricpb.MetricDescriptor
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -284,7 +285,7 @@ func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitori
|
|||
// DeleteMetricDescriptor deletes a metric descriptor. Only user-created
|
||||
// custom metrics (at /monitoring/custom-metrics) can be deleted.
|
||||
func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -296,7 +297,7 @@ func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitori
|
|||
|
||||
// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account.
|
||||
func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...)
|
||||
it := &TimeSeriesIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) {
|
||||
|
@ -334,7 +335,7 @@ func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.Lis
|
|||
// If any time series could not be written, a corresponding failure message is
|
||||
// included in the error response.
|
||||
func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
29
vendor/cloud.google.com/go/profiler/busybench/busybench.go
generated
vendored
29
vendor/cloud.google.com/go/profiler/busybench/busybench.go
generated
vendored
|
@ -21,16 +21,20 @@ import (
|
|||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var service = flag.String("service", "", "service name")
|
||||
var (
|
||||
service = flag.String("service", "", "service name")
|
||||
mutexProfiling = flag.Bool("mutex_profiling", false, "enable mutex profiling")
|
||||
)
|
||||
|
||||
const duration = time.Minute * 10
|
||||
|
||||
// busywork continuously generates 1MiB of random data and compresses it
|
||||
// throwing away the result.
|
||||
func busywork() {
|
||||
func busywork(mu *sync.Mutex) {
|
||||
ticker := time.NewTicker(duration)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
@ -38,7 +42,9 @@ func busywork() {
|
|||
case <-ticker.C:
|
||||
return
|
||||
default:
|
||||
mu.Lock()
|
||||
busyworkOnce()
|
||||
mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -68,12 +74,27 @@ func main() {
|
|||
|
||||
if *service == "" {
|
||||
log.Print("Service name must be configured using --service flag.")
|
||||
} else if err := profiler.Start(profiler.Config{Service: *service, DebugLogging: true}); err != nil {
|
||||
} else if err := profiler.Start(
|
||||
profiler.Config{
|
||||
Service: *service,
|
||||
MutexProfiling: *mutexProfiling,
|
||||
DebugLogging: true,
|
||||
}); err != nil {
|
||||
log.Printf("Failed to start the profiler: %v", err)
|
||||
} else {
|
||||
busywork()
|
||||
mu := new(sync.Mutex)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(5)
|
||||
for i := 0; i < 5; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
busywork(mu)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
log.Printf("busybench finished profiling.")
|
||||
// Do not exit, since the pod in the GKE test is set to always restart.
|
||||
select {}
|
||||
}
|
||||
|
|
5
vendor/cloud.google.com/go/profiler/integration-test.sh
generated
vendored
5
vendor/cloud.google.com/go/profiler/integration-test.sh
generated
vendored
|
@ -29,10 +29,7 @@ mkdir -p $GOCLOUD_HOME
|
|||
|
||||
# Move code into $GOPATH and get dependencies
|
||||
cp -R ./* $GOCLOUD_HOME
|
||||
cd $GOCLOUD_HOME
|
||||
go get -v ./...
|
||||
|
||||
cd internal/kokoro
|
||||
cd $GOCLOUD_HOME/internal/kokoro
|
||||
# Don't print out encryption keys, etc
|
||||
set +x
|
||||
key=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_key")
|
||||
|
|
54
vendor/cloud.google.com/go/profiler/integration_test.go
generated
vendored
54
vendor/cloud.google.com/go/profiler/integration_test.go
generated
vendored
|
@ -86,18 +86,18 @@ mkdir -p $GOCLOUD_HOME
|
|||
# Install agent
|
||||
git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME
|
||||
|
||||
cd $GOCLOUD_HOME
|
||||
cd $GOCLOUD_HOME/profiler/busybench
|
||||
git reset --hard {{.Commit}}
|
||||
go get -v ./...
|
||||
go get -v
|
||||
|
||||
# Run benchmark with agent
|
||||
go run profiler/busybench/busybench.go --service="{{.Service}}"
|
||||
go run busybench.go --service="{{.Service}}" --mutex_profiling="{{.MutexProfiling}}"
|
||||
`
|
||||
|
||||
const dockerfileFmt = `FROM golang
|
||||
RUN git clone https://code.googlesource.com/gocloud /go/src/cloud.google.com/go \
|
||||
&& cd /go/src/cloud.google.com/go && git reset --hard %s \
|
||||
&& go get -v cloud.google.com/go/... && go install -v cloud.google.com/go/profiler/busybench
|
||||
&& cd /go/src/cloud.google.com/go/profiler/busybench && git reset --hard %s \
|
||||
&& go get -v && go install -v
|
||||
CMD ["busybench", "--service", "%s"]
|
||||
`
|
||||
|
||||
|
@ -156,22 +156,25 @@ func validateProfileData(rawData []byte, wantFunctionName string) error {
|
|||
}
|
||||
|
||||
type instanceConfig struct {
|
||||
name string
|
||||
service string
|
||||
goVersion string
|
||||
name string
|
||||
service string
|
||||
goVersion string
|
||||
mutexProfiling bool
|
||||
}
|
||||
|
||||
func newInstanceConfigs() []instanceConfig {
|
||||
return []instanceConfig{
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go19-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go19-%d-gce", runID),
|
||||
goVersion: "1.9",
|
||||
name: fmt.Sprintf("profiler-test-go19-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go19-%d-gce", runID),
|
||||
goVersion: "1.9",
|
||||
mutexProfiling: true,
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go18-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go18-%d-gce", runID),
|
||||
goVersion: "1.8",
|
||||
name: fmt.Sprintf("profiler-test-go18-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go18-%d-gce", runID),
|
||||
goVersion: "1.8",
|
||||
mutexProfiling: true,
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go17-%d", runID),
|
||||
|
@ -208,13 +211,15 @@ func renderStartupScript(template *template.Template, inst instanceConfig) (stri
|
|||
var buf bytes.Buffer
|
||||
err := template.Execute(&buf,
|
||||
struct {
|
||||
Service string
|
||||
GoVersion string
|
||||
Commit string
|
||||
Service string
|
||||
GoVersion string
|
||||
Commit string
|
||||
MutexProfiling bool
|
||||
}{
|
||||
Service: inst.service,
|
||||
GoVersion: inst.goVersion,
|
||||
Commit: *commit,
|
||||
Service: inst.service,
|
||||
GoVersion: inst.goVersion,
|
||||
Commit: *commit,
|
||||
MutexProfiling: inst.mutexProfiling,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to render startup script for %s: %v", inst.name, err)
|
||||
|
@ -339,7 +344,12 @@ func (tr *testRunner) runTestOnGCE(ctx context.Context, t *testing.T, inst insta
|
|||
timeNow := time.Now()
|
||||
endTime := timeNow.Format(time.RFC3339)
|
||||
startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
for _, pType := range []string{"CPU", "HEAP"} {
|
||||
|
||||
profileTypes := []string{"CPU", "HEAP", "THREADS"}
|
||||
if inst.mutexProfiling {
|
||||
profileTypes = append(profileTypes, "CONTENTION")
|
||||
}
|
||||
for _, pType := range profileTypes {
|
||||
if err := tr.queryAndCheckProfile(inst.service, startTime, endTime, pType, projectID); err != nil {
|
||||
t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", inst.service, startTime, endTime, pType, err)
|
||||
}
|
||||
|
@ -586,7 +596,7 @@ func (tr *testRunner) runTestOnGKE(ctx context.Context, t *testing.T, cfg cluste
|
|||
timeNow := time.Now()
|
||||
endTime := timeNow.Format(time.RFC3339)
|
||||
startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
for _, pType := range []string{"CPU", "HEAP"} {
|
||||
for _, pType := range []string{"CPU", "HEAP", "THREADS"} {
|
||||
if err := tr.queryAndCheckProfile(cfg.service, startTime, endTime, pType, projectID); err != nil {
|
||||
t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", cfg.service, startTime, endTime, pType, err)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,16 +12,14 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
// +build go1.8
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
}
|
||||
}
|
||||
package profiler
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
import "runtime"
|
||||
|
||||
func enableMutexProfiling() bool {
|
||||
// One percent of mutex contention events are profiled.
|
||||
runtime.SetMutexProfileFraction(100)
|
||||
return true
|
||||
}
|
21
vendor/cloud.google.com/go/profiler/mutex_go17.go
generated
vendored
Normal file
21
vendor/cloud.google.com/go/profiler/mutex_go17.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package profiler
|
||||
|
||||
func enableMutexProfiling() bool {
|
||||
return false
|
||||
}
|
120
vendor/cloud.google.com/go/profiler/profiler.go
generated
vendored
120
vendor/cloud.google.com/go/profiler/profiler.go
generated
vendored
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package profiler is a client for the Google Cloud Profiler service.
|
||||
// Package profiler is a client for the Stackdriver Profiler service.
|
||||
//
|
||||
// This package is still experimental and subject to change.
|
||||
//
|
||||
|
@ -20,17 +20,20 @@
|
|||
//
|
||||
// import "cloud.google.com/go/profiler"
|
||||
// ...
|
||||
// err := profiler.Start(profiler.Config{Service: "my-service"})
|
||||
// if err != nil {
|
||||
// if err := profiler.Start(profiler.Config{Service: "my-service"}); err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
//
|
||||
// Calling Start will start a goroutine to collect profiles and
|
||||
// upload to Cloud Profiler server, at the rhythm specified by
|
||||
// the server.
|
||||
// Calling Start will start a goroutine to collect profiles and upload to
|
||||
// the profiler server, at the rhythm specified by the server.
|
||||
//
|
||||
// The caller must provide the service string in the config, and
|
||||
// may provide other information as well. See Config for details.
|
||||
// The caller must provide the service string in the config, and may provide
|
||||
// other information as well. See Config for details.
|
||||
//
|
||||
// Profiler has CPU, heap and goroutine profiling enabled by default. Mutex
|
||||
// profiling can be enabled in the config. Note that goroutine and mutex
|
||||
// profiles are shown as "threads" and "contention" profiles in the profiler
|
||||
// UI.
|
||||
package profiler
|
||||
|
||||
import (
|
||||
|
@ -39,6 +42,7 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -47,6 +51,7 @@ import (
|
|||
"cloud.google.com/go/internal/version"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/google/pprof/profile"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
|
@ -60,8 +65,9 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
config Config
|
||||
startOnce sync.Once
|
||||
config Config
|
||||
startOnce sync.Once
|
||||
mutexEnabled bool
|
||||
// The functions below are stubbed to be overrideable for testing.
|
||||
getProjectID = gcemd.ProjectID
|
||||
getInstanceName = gcemd.InstanceName
|
||||
|
@ -93,7 +99,7 @@ const (
|
|||
type Config struct {
|
||||
// Service (or deprecated Target) must be provided to start the profiler.
|
||||
// It specifies the name of the service under which the profiled data
|
||||
// will be recorded and exposed at the Cloud Profiler UI for the project.
|
||||
// will be recorded and exposed at the Profiler UI for the project.
|
||||
// You can specify an arbitrary string, but see Deployment.target at
|
||||
// https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudprofiler/v2/profiler.proto
|
||||
// for restrictions.
|
||||
|
@ -103,7 +109,7 @@ type Config struct {
|
|||
Service string
|
||||
|
||||
// ServiceVersion is an optional field specifying the version of the
|
||||
// service. It can be an arbitrary string. Cloud Profiler profiles
|
||||
// service. It can be an arbitrary string. Profiler profiles
|
||||
// once per minute for each version of each service in each zone.
|
||||
// ServiceVersion defaults to an empty string.
|
||||
ServiceVersion string
|
||||
|
@ -112,6 +118,11 @@ type Config struct {
|
|||
// defaults to false.
|
||||
DebugLogging bool
|
||||
|
||||
// MutexProfiling enables mutex profiling. It defaults to false.
|
||||
// Note that mutex profiling is not supported by Go versions older
|
||||
// than Go 1.8.
|
||||
MutexProfiling bool
|
||||
|
||||
// ProjectID is the Cloud Console project ID to use instead of
|
||||
// the one read from the VM metadata server.
|
||||
//
|
||||
|
@ -151,6 +162,11 @@ func start(cfg Config, options ...option.ClientOption) error {
|
|||
debugLog("failed to initialize config: %v", err)
|
||||
return err
|
||||
}
|
||||
if config.MutexProfiling {
|
||||
if mutexEnabled = enableMutexProfiling(); !mutexEnabled {
|
||||
return fmt.Errorf("mutex profiling is not supported by %s, requires Go 1.8 or later", runtime.Version())
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -177,16 +193,17 @@ func debugLog(format string, e ...interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// agent polls Cloud Profiler server for instructions on behalf of
|
||||
// a task, and collects and uploads profiles as requested.
|
||||
// agent polls the profiler server for instructions on behalf of a task,
|
||||
// and collects and uploads profiles as requested.
|
||||
type agent struct {
|
||||
client pb.ProfilerServiceClient
|
||||
deployment *pb.Deployment
|
||||
profileLabels map[string]string
|
||||
profileTypes []pb.ProfileType
|
||||
}
|
||||
|
||||
// abortedBackoffDuration retrieves the retry duration from gRPC trailing
|
||||
// metadata, which is set by Cloud Profiler server.
|
||||
// metadata, which is set by the profiler server.
|
||||
func abortedBackoffDuration(md grpcmd.MD) (time.Duration, error) {
|
||||
elem := md[retryInfoMetadata]
|
||||
if len(elem) <= 0 {
|
||||
|
@ -223,14 +240,14 @@ func (r *retryer) Retry(err error) (time.Duration, bool) {
|
|||
return r.backoff.Pause(), true
|
||||
}
|
||||
|
||||
// createProfile talks to Cloud Profiler server to create profile. In
|
||||
// createProfile talks to the profiler server to create profile. In
|
||||
// case of error, the goroutine will sleep and retry. Sleep duration may
|
||||
// be specified by the server. Otherwise it will be an exponentially
|
||||
// increasing value, bounded by maxBackoff.
|
||||
func (a *agent) createProfile(ctx context.Context) *pb.Profile {
|
||||
req := pb.CreateProfileRequest{
|
||||
Deployment: a.deployment,
|
||||
ProfileType: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP},
|
||||
ProfileType: a.profileTypes,
|
||||
}
|
||||
|
||||
var p *pb.Profile
|
||||
|
@ -277,6 +294,21 @@ func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) {
|
|||
debugLog("failed to write heap profile: %v", err)
|
||||
return
|
||||
}
|
||||
case pb.ProfileType_THREADS:
|
||||
if err := pprof.Lookup("goroutine").WriteTo(&prof, 0); err != nil {
|
||||
debugLog("failed to create goroutine profile: %v", err)
|
||||
return
|
||||
}
|
||||
case pb.ProfileType_CONTENTION:
|
||||
duration, err := ptypes.Duration(p.Duration)
|
||||
if err != nil {
|
||||
debugLog("failed to get profile duration: %v", err)
|
||||
return
|
||||
}
|
||||
if err := deltaMutexProfile(ctx, duration, &prof); err != nil {
|
||||
debugLog("failed to create mutex profile: %v", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
debugLog("unexpected profile type: %v", pt)
|
||||
return
|
||||
|
@ -285,7 +317,7 @@ func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) {
|
|||
// Starting Go 1.9 the profiles are symbolized by runtime/pprof.
|
||||
// TODO(jianqiaoli): Remove the symbolization code when we decide to
|
||||
// stop supporting Go 1.8.
|
||||
if !shouldAssumeSymbolized {
|
||||
if !shouldAssumeSymbolized && pt != pb.ProfileType_CONTENTION {
|
||||
if err := parseAndSymbolize(&prof); err != nil {
|
||||
debugLog("failed to symbolize profile: %v", err)
|
||||
}
|
||||
|
@ -302,6 +334,50 @@ func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) {
|
|||
}
|
||||
}
|
||||
|
||||
// deltaMutexProfile writes mutex profile changes over a time period specified
|
||||
// with 'duration' to 'prof'.
|
||||
func deltaMutexProfile(ctx context.Context, duration time.Duration, prof *bytes.Buffer) error {
|
||||
if !mutexEnabled {
|
||||
return errors.New("mutex profiling is not enabled")
|
||||
}
|
||||
p0, err := mutexProfile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sleep(ctx, duration)
|
||||
p, err := mutexProfile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(jianqiaoli): Remove this check when github.com/google/pprof/issues/242
|
||||
// is fixed.
|
||||
if len(p0.Mapping) > 0 {
|
||||
p0.Scale(-1)
|
||||
p, err = profile.Merge([]*profile.Profile{p0, p})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// The mutex profile is not symbolized by runtime.pprof until
|
||||
// golang.org/issue/21474 is fixed in go1.10.
|
||||
symbolize(p)
|
||||
return p.Write(prof)
|
||||
}
|
||||
|
||||
func mutexProfile() (*profile.Profile, error) {
|
||||
p := pprof.Lookup("mutex")
|
||||
if p == nil {
|
||||
return nil, errors.New("mutex profiling is not supported")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := p.WriteTo(&buf, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return profile.Parse(&buf)
|
||||
}
|
||||
|
||||
// withXGoogHeader sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
|
@ -335,10 +411,16 @@ func initializeAgent(c pb.ProfilerServiceClient) *agent {
|
|||
profileLabels[instanceLabel] = config.instance
|
||||
}
|
||||
|
||||
profileTypes := []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}
|
||||
if mutexEnabled {
|
||||
profileTypes = append(profileTypes, pb.ProfileType_CONTENTION)
|
||||
}
|
||||
|
||||
return &agent{
|
||||
client: c,
|
||||
deployment: d,
|
||||
profileLabels: profileLabels,
|
||||
profileTypes: profileTypes,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -388,7 +470,7 @@ func initializeConfig(cfg Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// pollProfilerService starts an endless loop to poll Cloud Profiler
|
||||
// pollProfilerService starts an endless loop to poll the profiler
|
||||
// server for instructions, and collects and uploads profiles as
|
||||
// requested.
|
||||
func pollProfilerService(ctx context.Context, a *agent) {
|
||||
|
|
3
vendor/cloud.google.com/go/profiler/profiler_example_test.go
generated
vendored
3
vendor/cloud.google.com/go/profiler/profiler_example_test.go
generated
vendored
|
@ -19,8 +19,7 @@ import (
|
|||
)
|
||||
|
||||
func ExampleStart() {
|
||||
err := profiler.Start(profiler.Config{Service: "my-service", ServiceVersion: "v1"})
|
||||
if err != nil {
|
||||
if err := profiler.Start(profiler.Config{Service: "my-service", ServiceVersion: "v1"}); err != nil {
|
||||
//TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
|
123
vendor/cloud.google.com/go/profiler/profiler_test.go
generated
vendored
123
vendor/cloud.google.com/go/profiler/profiler_test.go
generated
vendored
|
@ -23,7 +23,9 @@ import (
|
|||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -71,6 +73,7 @@ func createTestAgent(psc pb.ProfilerServiceClient) *agent {
|
|||
client: psc,
|
||||
deployment: createTestDeployment(),
|
||||
profileLabels: map[string]string{instanceLabel: testInstance},
|
||||
profileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,7 +95,7 @@ func TestCreateProfile(t *testing.T) {
|
|||
p := &pb.Profile{Name: "test_profile"}
|
||||
wantRequest := pb.CreateProfileRequest{
|
||||
Deployment: a.deployment,
|
||||
ProfileType: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP},
|
||||
ProfileType: a.profileTypes,
|
||||
}
|
||||
|
||||
mpc.EXPECT().CreateProfile(ctx, gomock.Eq(&wantRequest), gomock.Any()).Times(1).Return(p, nil)
|
||||
|
@ -334,13 +337,14 @@ func TestWithXGoogHeader(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInitializeAgent(t *testing.T) {
|
||||
oldConfig := config
|
||||
oldConfig, oldMutexEnabled := config, mutexEnabled
|
||||
defer func() {
|
||||
config = oldConfig
|
||||
config, mutexEnabled = oldConfig, oldMutexEnabled
|
||||
}()
|
||||
|
||||
for _, tt := range []struct {
|
||||
config Config
|
||||
enableMutex bool
|
||||
wantDeploymentLabels map[string]string
|
||||
wantProfileLabels map[string]string
|
||||
}{
|
||||
|
@ -364,11 +368,18 @@ func TestInitializeAgent(t *testing.T) {
|
|||
wantDeploymentLabels: map[string]string{},
|
||||
wantProfileLabels: map[string]string{instanceLabel: testInstance},
|
||||
},
|
||||
{
|
||||
config: Config{instance: testInstance},
|
||||
enableMutex: true,
|
||||
wantDeploymentLabels: map[string]string{},
|
||||
wantProfileLabels: map[string]string{instanceLabel: testInstance},
|
||||
},
|
||||
} {
|
||||
|
||||
config = tt.config
|
||||
config.ProjectID = testProjectID
|
||||
config.Target = testTarget
|
||||
mutexEnabled = tt.enableMutex
|
||||
a := initializeAgent(nil)
|
||||
|
||||
wantDeployment := &pb.Deployment{
|
||||
|
@ -377,12 +388,21 @@ func TestInitializeAgent(t *testing.T) {
|
|||
Labels: tt.wantDeploymentLabels,
|
||||
}
|
||||
if !testutil.Equal(a.deployment, wantDeployment) {
|
||||
t.Errorf("initializeResources() got deployment: %v, want %v", a.deployment, wantDeployment)
|
||||
t.Errorf("initializeAgent() got deployment: %v, want %v", a.deployment, wantDeployment)
|
||||
}
|
||||
|
||||
if !testutil.Equal(a.profileLabels, tt.wantProfileLabels) {
|
||||
t.Errorf("initializeResources() got profile labels: %v, want %v", a.profileLabels, tt.wantProfileLabels)
|
||||
t.Errorf("initializeAgent() got profile labels: %v, want %v", a.profileLabels, tt.wantProfileLabels)
|
||||
}
|
||||
|
||||
wantProfileTypes := []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}
|
||||
if tt.enableMutex {
|
||||
wantProfileTypes = append(wantProfileTypes, pb.ProfileType_CONTENTION)
|
||||
}
|
||||
if !testutil.Equal(a.profileTypes, wantProfileTypes) {
|
||||
t.Errorf("initializeAgent() got profile types: %v, want %v", a.profileTypes, wantProfileTypes)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -653,6 +673,99 @@ func validateProfile(rawData []byte, wantFunctionName string) error {
|
|||
return fmt.Errorf("wanted function name %s not found in the profile", wantFunctionName)
|
||||
}
|
||||
|
||||
func TestDeltaMutexProfile(t *testing.T) {
|
||||
oldMutexEnabled, oldMaxProcs := mutexEnabled, runtime.GOMAXPROCS(10)
|
||||
defer func() {
|
||||
mutexEnabled = oldMutexEnabled
|
||||
runtime.GOMAXPROCS(oldMaxProcs)
|
||||
}()
|
||||
if mutexEnabled = enableMutexProfiling(); !mutexEnabled {
|
||||
t.Skip("Go too old - mutex profiling not supported.")
|
||||
}
|
||||
|
||||
hog(time.Second, mutexHog)
|
||||
go func() {
|
||||
hog(2*time.Second, backgroundHog)
|
||||
}()
|
||||
|
||||
var prof bytes.Buffer
|
||||
if err := deltaMutexProfile(context.Background(), time.Second, &prof); err != nil {
|
||||
t.Fatalf("deltaMutexProfile() got error: %v", err)
|
||||
}
|
||||
p, err := profile.Parse(&prof)
|
||||
if err != nil {
|
||||
t.Fatalf("profile.Parse() got error: %v", err)
|
||||
}
|
||||
|
||||
if s := sum(p, "mutexHog"); s != 0 {
|
||||
t.Errorf("mutexHog found in the delta mutex profile (sum=%d):\n%s", s, p)
|
||||
}
|
||||
if s := sum(p, "backgroundHog"); s <= 0 {
|
||||
t.Errorf("backgroundHog not in the delta mutex profile (sum=%d):\n%s", s, p)
|
||||
}
|
||||
}
|
||||
|
||||
// sum returns the sum of all mutex counts from the samples whose
|
||||
// stacks include the specified function name.
|
||||
func sum(p *profile.Profile, fname string) int64 {
|
||||
locIDs := map[*profile.Location]bool{}
|
||||
for _, loc := range p.Location {
|
||||
for _, l := range loc.Line {
|
||||
if strings.Contains(l.Function.Name, fname) {
|
||||
locIDs[loc] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
var s int64
|
||||
for _, sample := range p.Sample {
|
||||
for _, loc := range sample.Location {
|
||||
if locIDs[loc] {
|
||||
s += sample.Value[0]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func mutexHog(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) {
|
||||
for time.Since(start) < dt {
|
||||
mu1.Lock()
|
||||
runtime.Gosched()
|
||||
mu2.Lock()
|
||||
mu1.Unlock()
|
||||
mu2.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// backgroundHog is identical to mutexHog. We keep them separate
|
||||
// in order to distinguish them with function names in the stack trace.
|
||||
func backgroundHog(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) {
|
||||
for time.Since(start) < dt {
|
||||
mu1.Lock()
|
||||
runtime.Gosched()
|
||||
mu2.Lock()
|
||||
mu1.Unlock()
|
||||
mu2.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func hog(dt time.Duration, hogger func(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration)) {
|
||||
start := time.Now()
|
||||
mu1 := new(sync.Mutex)
|
||||
mu2 := new(sync.Mutex)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
hogger(mu1, mu2, start, dt)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestAgentWithServer(t *testing.T) {
|
||||
oldDialGRPC, oldConfig := dialGRPC, config
|
||||
defer func() {
|
||||
|
|
18
vendor/cloud.google.com/go/pubsub/apiv1/doc.go
generated
vendored
18
vendor/cloud.google.com/go/pubsub/apiv1/doc.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package pubsub is an experimental, auto-generated package for the
|
||||
// Package pubsub is an auto-generated package for the
|
||||
// Google Cloud Pub/Sub API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes.
|
||||
//
|
||||
// Provides reliable, many-to-many, asynchronous messaging between
|
||||
// applications.
|
||||
//
|
||||
|
@ -28,11 +30,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
27
vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
generated
vendored
27
vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// PublisherCallOptions contains the retry settings for each method of PublisherClient.
|
||||
|
@ -67,13 +68,13 @@ func defaultPublisherCallOptions() *PublisherCallOptions {
|
|||
{"messaging", "one_plus_delivery"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Canceled,
|
||||
codes.Unknown,
|
||||
codes.DeadlineExceeded,
|
||||
codes.ResourceExhausted,
|
||||
codes.Aborted,
|
||||
codes.Canceled,
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.ResourceExhausted,
|
||||
codes.Unavailable,
|
||||
codes.Unknown,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
|
@ -105,7 +106,7 @@ type PublisherClient struct {
|
|||
CallOptions *PublisherCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewPublisherClient creates a new publisher client.
|
||||
|
@ -144,7 +145,7 @@ func (c *PublisherClient) Close() error {
|
|||
func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// PublisherProjectPath returns the path for the project resource.
|
||||
|
@ -175,7 +176,7 @@ func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
|
|||
|
||||
// CreateTopic creates the given topic with the given name.
|
||||
func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...)
|
||||
var resp *pubsubpb.Topic
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -196,7 +197,7 @@ func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic,
|
|||
// corrected in V2. See
|
||||
// https://cloud.google.com/apis/design/standard_methods#update for details.
|
||||
func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...)
|
||||
var resp *pubsubpb.Topic
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -214,7 +215,7 @@ func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateT
|
|||
// does not exist. The message payload must not be empty; it must contain
|
||||
// either a non-empty data field, or at least one attribute.
|
||||
func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...)
|
||||
var resp *pubsubpb.PublishResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -230,7 +231,7 @@ func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequ
|
|||
|
||||
// GetTopic gets the configuration of a topic.
|
||||
func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...)
|
||||
var resp *pubsubpb.Topic
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -246,7 +247,7 @@ func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRe
|
|||
|
||||
// ListTopics lists matching topics.
|
||||
func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...)
|
||||
it := &TopicIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) {
|
||||
|
@ -281,7 +282,7 @@ func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopi
|
|||
|
||||
// ListTopicSubscriptions lists the name of the subscriptions for this topic.
|
||||
func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...)
|
||||
it := &StringIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
|
||||
|
@ -320,7 +321,7 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu
|
|||
// configuration or subscriptions. Existing subscriptions to this topic are
|
||||
// not deleted, but their topic field is set to _deleted-topic_.
|
||||
func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
39
vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
generated
vendored
39
vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
|||
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// SubscriberCallOptions contains the retry settings for each method of SubscriberClient.
|
||||
|
@ -77,8 +78,8 @@ func defaultSubscriberCallOptions() *SubscriberCallOptions {
|
|||
return gax.OnCodes([]codes.Code{
|
||||
codes.Canceled,
|
||||
codes.DeadlineExceeded,
|
||||
codes.ResourceExhausted,
|
||||
codes.Internal,
|
||||
codes.ResourceExhausted,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
|
@ -92,8 +93,8 @@ func defaultSubscriberCallOptions() *SubscriberCallOptions {
|
|||
return gax.OnCodes([]codes.Code{
|
||||
codes.Canceled,
|
||||
codes.DeadlineExceeded,
|
||||
codes.ResourceExhausted,
|
||||
codes.Internal,
|
||||
codes.ResourceExhausted,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
|
@ -134,7 +135,7 @@ type SubscriberClient struct {
|
|||
CallOptions *SubscriberCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// NewSubscriberClient creates a new subscriber client.
|
||||
|
@ -173,7 +174,7 @@ func (c *SubscriberClient) Close() error {
|
|||
func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// SubscriberProjectPath returns the path for the project resource.
|
||||
|
@ -233,7 +234,7 @@ func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
|
|||
// The generated name is populated in the returned Subscription object.
|
||||
// Note that for REST API requests, you must specify a name in the request.
|
||||
func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...)
|
||||
var resp *pubsubpb.Subscription
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -249,7 +250,7 @@ func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb
|
|||
|
||||
// GetSubscription gets the configuration details of a subscription.
|
||||
func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...)
|
||||
var resp *pubsubpb.Subscription
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -270,7 +271,7 @@ func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.Ge
|
|||
// corrected in V2. See
|
||||
// https://cloud.google.com/apis/design/standard_methods#update for details.
|
||||
func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...)
|
||||
var resp *pubsubpb.Subscription
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -286,7 +287,7 @@ func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb
|
|||
|
||||
// ListSubscriptions lists matching subscriptions.
|
||||
func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...)
|
||||
it := &SubscriptionIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) {
|
||||
|
@ -325,7 +326,7 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.
|
|||
// the same name, but the new one has no association with the old
|
||||
// subscription or its topic unless the same topic is specified.
|
||||
func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -341,7 +342,7 @@ func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb
|
|||
// processing was interrupted. Note that this does not modify the
|
||||
// subscription-level ackDeadlineSeconds used for subsequent messages.
|
||||
func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -359,7 +360,7 @@ func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.
|
|||
// but such a message may be redelivered later. Acknowledging a message more
|
||||
// than once will not result in an error.
|
||||
func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -374,7 +375,7 @@ func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.Acknow
|
|||
// there are too many concurrent pull requests pending for the given
|
||||
// subscription.
|
||||
func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...)
|
||||
var resp *pubsubpb.PullResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -401,7 +402,7 @@ func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest,
|
|||
// (e.g., a server restart). These should also be retried by the client. Flow
|
||||
// control can be achieved by configuring the underlying RPC channel.
|
||||
func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...)
|
||||
var resp pubsubpb.Subscriber_StreamingPullClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -422,7 +423,7 @@ func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOp
|
|||
// attributes of a push subscription. Messages will accumulate for delivery
|
||||
// continuously through the call regardless of changes to the PushConfig.
|
||||
func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -434,7 +435,7 @@ func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.M
|
|||
|
||||
// ListSnapshots lists the existing snapshots.
|
||||
func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...)
|
||||
it := &SnapshotIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) {
|
||||
|
@ -478,7 +479,7 @@ func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.List
|
|||
// The generated name is populated in the returned Snapshot object.
|
||||
// Note that for REST API requests, you must specify a name in the request.
|
||||
func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...)
|
||||
var resp *pubsubpb.Snapshot
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -499,7 +500,7 @@ func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.Cre
|
|||
// corrected in V2. See
|
||||
// https://cloud.google.com/apis/design/standard_methods#update for details.
|
||||
func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...)
|
||||
var resp *pubsubpb.Snapshot
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -518,7 +519,7 @@ func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.Upd
|
|||
// created with the same name, but the new one has no association with the old
|
||||
// snapshot or its subscription, unless the same subscription is specified.
|
||||
func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -531,7 +532,7 @@ func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.Del
|
|||
// Seek seeks an existing subscription to a point in time or to a given snapshot,
|
||||
// whichever is provided in the request.
|
||||
func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.Metadata)
|
||||
opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...)
|
||||
var resp *pubsubpb.SeekResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue