forked from TrueCloudLab/rclone
vendor: update all dependencies
This commit is contained in:
parent
0b6fba34a3
commit
eb87cf6f12
2008 changed files with 352633 additions and 1004750 deletions
72
Gopkg.lock
generated
72
Gopkg.lock
generated
|
@ -9,9 +9,9 @@
|
|||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata","internal"]
|
||||
revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c"
|
||||
version = "v0.7.0"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "a5913b3f7deecba45e98ff33cefbac4fd204ddd7"
|
||||
version = "v0.10.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -23,13 +23,13 @@
|
|||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
|
||||
revision = "4cc8cc5a2a44f01d31b303a7280e20e00a6eafdb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "57572ec625c9aa8bf5c45453efa923bacabc0afe"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "bd544a64249f735bcf394ab0c4fa8bec9b31f221"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
|
@ -52,14 +52,14 @@
|
|||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "e7fea39b01aea8d5671f6858f0532f56e8bff3a5"
|
||||
version = "v1.27.0"
|
||||
revision = "d3de07a94d22b4a0972deb4b96d790c2c0ce8333"
|
||||
version = "v1.28.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","ptypes/any"]
|
||||
revision = "b50ceb1fa9818fa4d78b016c2d4ae025593a7ce3"
|
||||
packages = ["proto"]
|
||||
revision = "0a4f71a498b7c4812f64969510bcb4eca251e33a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -67,12 +67,6 @@
|
|||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
|
@ -83,7 +77,7 @@
|
|||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "5c7b901224c7880b293e0b5486cb6ebf97bfca37"
|
||||
revision = "769512c448b98e9efa243279a7e281248332aa98"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
|
@ -119,13 +113,13 @@
|
|||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "8e9b10220613abdbc2896808ee6b43e411a4fa6c"
|
||||
revision = "5068c3506cf003c630c94b92a64e978115394f26"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "4163cd39dda1c0dda883a713640bc01e08951c24"
|
||||
revision = "4ed959e0540971545eddb8c75514973d670cf739"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -137,7 +131,7 @@
|
|||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "a5f8514e29e90a859e93871b1582e5c81f466f82"
|
||||
revision = "4f3e725e885c021085d2fb8a9cc26e30ea1a992f"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
|
@ -161,7 +155,7 @@
|
|||
branch = "master"
|
||||
name = "github.com/shurcooL/sanitized_anchor_name"
|
||||
packages = ["."]
|
||||
revision = "79c90efaf01eddc01945af5bc1797859189b830b"
|
||||
revision = "541ff5ee47f1dddf6a5281af78307d921524bcb5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -173,19 +167,19 @@
|
|||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "90687e7bfc7e1e5cd88eb1f513f32f01dc03dd7c"
|
||||
revision = "715f41bd7a70b5111f898b71ab484da52ee6266d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require"]
|
||||
revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
revision = "05e8a0eda380579888eb53c394909df027f06991"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -197,31 +191,31 @@
|
|||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "122d919ec1efcfb58483215da23f815853e24b81"
|
||||
revision = "6914964337150723782436d56b3f21610a74ce7b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","html","html/atom","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "c9b681d35165f1995d6f3034e61f8761d4b90c99"
|
||||
packages = ["context","context/ctxhttp","html","html/atom"]
|
||||
revision = "ab5485076ff3407ad2d02db054635913f017b0ed"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "ad516a297a9f2a74ecc244861b298c94bdd28b9d"
|
||||
revision = "b53b38ad8a6435bd399ea76d0fa74f23149cca4e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "9ccfe848b9db8435a24c424abbc07a921adf1df5"
|
||||
revision = "c4489faa6e5ab84c0ef40d6ee878f7a030281f0f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"]
|
||||
revision = "836efe42bb4aa16aaa17b9c155d8813d336ed720"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -233,7 +227,7 @@
|
|||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "c858ef4400610cbfd097ffc5f5c6e4a1a51eac86"
|
||||
revision = "295e4bb0ade057ae2cfb9876ab0b54635dbfcea4"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
|
@ -241,23 +235,11 @@
|
|||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "411e09b969b1170a9f0c467558eb4c4c110d9c77"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","codes","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
revision = "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
|
||||
revision = "25c4ec802a7d637f88d584ab26798e94ad14c13b"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
|
|
|
@ -49,8 +49,6 @@
|
|||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
|
@ -107,10 +105,6 @@
|
|||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/tsenart/tb"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
|
4
vendor/cloud.google.com/go/.travis.yml
generated
vendored
4
vendor/cloud.google.com/go/.travis.yml
generated
vendored
|
@ -7,9 +7,9 @@ go:
|
|||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
|
||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
go test -race -v cloud.google.com/go/...
|
||||
./run-tests.sh $TRAVIS_COMMIT
|
||||
env:
|
||||
matrix:
|
||||
# The GCLOUD_TESTS_API_KEY environment variable.
|
||||
|
|
6
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
6
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -4,6 +4,12 @@
|
|||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. You will need to ensure that your `GOBIN` directory (by default
|
||||
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
|
||||
1. If you would like, you may want to set up aliases for git-codereview,
|
||||
such that `git codereview change` becomes `git change`. See the
|
||||
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
|
||||
1. Should you run into issues with the git-codereview tool, please note
|
||||
that all error messages will assume that you have set up these
|
||||
aliases.
|
||||
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
|
290
vendor/cloud.google.com/go/README.md
generated
vendored
290
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -1,22 +1,21 @@
|
|||
# Google Cloud for Go
|
||||
# Google Cloud Client Libraries for Go
|
||||
|
||||
[](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
|
||||
[](https://godoc.org/cloud.google.com/go)
|
||||
|
||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
Go packages for Google Cloud Platform services.
|
||||
|
||||
To install the packages on your system,
|
||||
|
||||
```
|
||||
$ go get -u cloud.google.com/go/...
|
||||
```
|
||||
|
||||
**NOTE:** These packages are under development, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
**NOTE:** Some of these packages are under development, and may occasionally
|
||||
make backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
|
@ -34,154 +33,44 @@ backwards-incompatible changes.
|
|||
|
||||
## News
|
||||
|
||||
_March 17, 2017_
|
||||
|
||||
Breaking Pubsub changes.
|
||||
* Publish is now asynchronous
|
||||
([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
|
||||
* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
|
||||
* Message.Done replaced with Message.Ack and Message.Nack.
|
||||
|
||||
_February 14, 2017_
|
||||
|
||||
Release of a client library for Spanner. See
|
||||
the
|
||||
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
|
||||
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
|
||||
|
||||
Note that although the Spanner service is beta, the Go client library is alpha.
|
||||
|
||||
_December 12, 2016_
|
||||
|
||||
Beta release of BigQuery, DataStore, Logging and Storage. See the
|
||||
[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
|
||||
|
||||
Also, BigQuery now supports structs. Read a row directly into a struct with
|
||||
`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
|
||||
You can also use field tags. See the [package documentation][cloud-bigquery-ref]
|
||||
for details.
|
||||
|
||||
_December 5, 2016_
|
||||
|
||||
More changes to BigQuery:
|
||||
|
||||
* The `ValueList` type was removed. It is no longer necessary. Instead of
|
||||
```go
|
||||
var v ValueList
|
||||
... it.Next(&v) ..
|
||||
```
|
||||
use
|
||||
|
||||
```go
|
||||
var v []Value
|
||||
... it.Next(&v) ...
|
||||
```
|
||||
|
||||
* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
|
||||
`ValueList` would append to the slice. Now each call resets the size to zero first.
|
||||
|
||||
* Schema inference will infer the SQL type BYTES for a struct field of
|
||||
type []byte. Previously it inferred STRING.
|
||||
|
||||
* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
|
||||
inference. BigQuery's integer type is INT64, and those types may hold values
|
||||
that are not correctly represented in a 64-bit signed integer.
|
||||
|
||||
* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
|
||||
the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
|
||||
package.
|
||||
|
||||
_November 17, 2016_
|
||||
|
||||
Change to BigQuery: values from INTEGER columns will now be returned as int64,
|
||||
not int. This will avoid errors arising from large values on 32-bit systems.
|
||||
|
||||
_November 8, 2016_
|
||||
|
||||
New datastore feature: datastore now encodes your nested Go structs as Entity values,
|
||||
instead of a flattened list of the embedded struct's fields.
|
||||
This means that you may now have twice-nested slices, eg.
|
||||
```go
|
||||
type State struct {
|
||||
Cities []struct{
|
||||
Populations []int
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
|
||||
more details.
|
||||
|
||||
_November 8, 2016_
|
||||
|
||||
Breaking changes to datastore: contexts no longer hold namespaces; instead you
|
||||
must set a key's namespace explicitly. Also, key functions have been changed
|
||||
and renamed.
|
||||
|
||||
* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
|
||||
```go
|
||||
q := datastore.NewQuery("Kind").Namespace("ns")
|
||||
```
|
||||
|
||||
* All the fields of Key are exported. That means you can construct any Key with a struct literal:
|
||||
```go
|
||||
k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
|
||||
```
|
||||
|
||||
* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
|
||||
|
||||
* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
|
||||
```go
|
||||
NewIncompleteKey(ctx, kind, parent)
|
||||
```
|
||||
with
|
||||
```go
|
||||
IncompleteKey(kind, parent)
|
||||
```
|
||||
and if you do use namespaces, make sure you set the namespace on the returned key.
|
||||
|
||||
* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
|
||||
```go
|
||||
NewKey(ctx, kind, name, 0, parent)
|
||||
NewKey(ctx, kind, "", id, parent)
|
||||
```
|
||||
with
|
||||
```go
|
||||
NameKey(kind, name, parent)
|
||||
IDKey(kind, id, parent)
|
||||
```
|
||||
and if you do use namespaces, make sure you set the namespace on the returned key.
|
||||
|
||||
* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
|
||||
|
||||
* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
|
||||
|
||||
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
|
||||
more details.
|
||||
|
||||
_October 27, 2016_
|
||||
|
||||
Breaking change to bigquery: `NewGCSReference` is now a function,
|
||||
not a method on `Client`.
|
||||
|
||||
New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
|
||||
loading data into a table from a file or any `io.Reader`.
|
||||
|
||||
_October 21, 2016_
|
||||
|
||||
Breaking change to pubsub: removed `pubsub.Done`.
|
||||
|
||||
Use `iterator.Done` instead, where `iterator` is the package
|
||||
`google.golang.org/api/iterator`.
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
-------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref]
|
||||
[Language][cloud-language] | alpha | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | alpha | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
|
||||
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
Google API | Status | Package
|
||||
---------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
[Language][cloud-language] | beta | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | beta | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
||||
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
|
||||
[Video Intelligence][cloud-video]| alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errors`][cloud-errors-ref]
|
||||
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
|
@ -215,6 +104,7 @@ By default, each API will use [Google Application Default Credentials][default-c
|
|||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
[snip]:# (auth)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
```
|
||||
|
@ -225,6 +115,7 @@ pass
|
|||
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
|
||||
to the `NewClient` function of the desired package. For example:
|
||||
|
||||
[snip]:# (auth-JSON)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
|
||||
```
|
||||
|
@ -234,6 +125,7 @@ You can exert more control over authorization by using the
|
|||
create an `oauth2.TokenSource`. Then pass
|
||||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
|
||||
to the `NewClient` function:
|
||||
[snip]:# (auth-ts)
|
||||
```go
|
||||
tokenSource := ...
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
|
@ -251,6 +143,7 @@ client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
|||
|
||||
First create a `datastore.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (datastore-1)
|
||||
```go
|
||||
client, err := datastore.NewClient(ctx, "my-project-id")
|
||||
if err != nil {
|
||||
|
@ -260,6 +153,7 @@ if err != nil {
|
|||
|
||||
Then use that client to interact with the API:
|
||||
|
||||
[snip]:# (datastore-2)
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
|
@ -267,8 +161,8 @@ type Post struct {
|
|||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NewKey(ctx, "Post", "post1", 0, nil),
|
||||
datastore.NewKey(ctx, "Post", "post2", 0, nil),
|
||||
datastore.NameKey("Post", "post1", nil),
|
||||
datastore.NameKey("Post", "post2", nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
|
@ -290,6 +184,7 @@ if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
|||
|
||||
First create a `storage.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (storage-1)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
|
@ -297,6 +192,7 @@ if err != nil {
|
|||
}
|
||||
```
|
||||
|
||||
[snip]:# (storage-2)
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
|
@ -321,6 +217,7 @@ if err != nil {
|
|||
|
||||
First create a `pubsub.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (pubsub-1)
|
||||
```go
|
||||
client, err := pubsub.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
|
@ -330,36 +227,30 @@ if err != nil {
|
|||
|
||||
Then use the client to publish and subscribe:
|
||||
|
||||
[snip]:# (pubsub-2)
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
topic := client.Topic("topic1")
|
||||
msgIDs, err := topic.Publish(ctx, &pubsub.Message{
|
||||
res := topic.Publish(ctx, &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
// The publish happens asynchronously.
|
||||
// Later, you can get the result from res:
|
||||
...
|
||||
msgID, err := res.Get(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an iterator to pull messages via subscription1.
|
||||
it, err := client.Subscription("subscription1").Pull(ctx)
|
||||
// Use a callback to receive messages via subscription1.
|
||||
sub := client.Subscription("subscription1")
|
||||
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
|
||||
fmt.Println(m.Data)
|
||||
m.Ack() // Acknowledge that we've consumed the message.
|
||||
})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
defer it.Stop()
|
||||
|
||||
// Consume N messages from the iterator.
|
||||
for i := 0; i < N; i++ {
|
||||
msg, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve message: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Message %d: %s\n", i, msg.Data)
|
||||
msg.Done(true) // Acknowledge that we've consumed the message.
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud BigQuery [](https://godoc.org/cloud.google.com/go/bigquery)
|
||||
|
@ -372,13 +263,16 @@ for i := 0; i < N; i++ {
|
|||
### Example Usage
|
||||
|
||||
First create a `bigquery.Client` to use throughout your application:
|
||||
[snip]:# (bq-1)
|
||||
```go
|
||||
c, err := bigquery.NewClient(ctx, "my-project-ID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
Then use that client to interact with the API:
|
||||
[snip]:# (bq-2)
|
||||
```go
|
||||
// Construct a query.
|
||||
q := c.Query(`
|
||||
|
@ -391,19 +285,19 @@ q := c.Query(`
|
|||
// Execute the query.
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Iterate through the results.
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -418,29 +312,32 @@ for {
|
|||
### Example Usage
|
||||
|
||||
First create a `logging.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (logging-1)
|
||||
```go
|
||||
ctx := context.Background()
|
||||
client, err := logging.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
Usually, you'll want to add log entries to a buffer to be periodically flushed
|
||||
(automatically and asynchronously) to the Stackdriver Logging service.
|
||||
[snip]:# (logging-2)
|
||||
```go
|
||||
logger := client.Logger("my-log")
|
||||
logger.Log(logging.Entry{Payload: "something happened!"})
|
||||
```
|
||||
|
||||
Close your client before your program exits, to flush any buffered log entries.
|
||||
[snip]:# (logging-3)
|
||||
```go
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Cloud Spanner [](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
- [About Cloud Spanner][cloud-spanner]
|
||||
|
@ -451,26 +348,28 @@ if err != nil {
|
|||
|
||||
First create a `spanner.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (spanner-1)
|
||||
```go
|
||||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
[snip]:# (spanner-2)
|
||||
```go
|
||||
// Simple Reads And Writes
|
||||
_, err := client.Apply(ctx, []*spanner.Mutation{
|
||||
spanner.Insert("Users",
|
||||
[]string{"name", "email"},
|
||||
[]interface{}{"alice", "a@example.com"})})
|
||||
_, err = client.Apply(ctx, []*spanner.Mutation{
|
||||
spanner.Insert("Users",
|
||||
[]string{"name", "email"},
|
||||
[]interface{}{"alice", "a@example.com"})})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
row, err := client.Single().ReadRow(ctx, "Users",
|
||||
spanner.Key{"alice"}, []string{"email"})
|
||||
spanner.Key{"alice"}, []string{"email"})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -512,17 +411,32 @@ for more information.
|
|||
[cloud-logging-docs]: https://cloud.google.com/logging/docs
|
||||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
|
||||
|
||||
[cloud-vision]: https://cloud.google.com/vision/
|
||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
|
||||
[cloud-monitoring]: https://cloud.google.com/monitoring/
|
||||
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
|
||||
|
||||
[cloud-vision]: https://cloud.google.com/vision
|
||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
|
||||
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
|
||||
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
|
||||
|
||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
||||
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
|
||||
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
|
||||
|
||||
[cloud-translation]: https://cloud.google.com/translation
|
||||
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
|
||||
|
||||
[cloud-trace]: https://cloud.google.com/trace/
|
||||
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace
|
||||
|
||||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
||||
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
|
||||
|
||||
[cloud-errors]: https://cloud.google.com/error-reporting/
|
||||
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errors
|
||||
|
||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
||||
|
|
9
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
9
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
|
@ -74,3 +74,12 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
|
|||
func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) insertJob(ctx context.Context, conf *insertJobConf) (*Job, error) {
|
||||
job, err := c.service.insertJob(ctx, c.projectID, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
}
|
||||
|
|
2
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
|
@ -70,5 +70,5 @@ func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
|||
}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}}
|
||||
setJobRef(job, c.JobID, c.c.projectID)
|
||||
return c.c.service.insertJob(ctx, c.c.projectID, &insertJobConf{job: job})
|
||||
return c.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
}
|
||||
|
|
2
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
|
@ -72,5 +72,5 @@ func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
|||
conf.PrintHeader = &f
|
||||
}
|
||||
|
||||
return e.c.service.insertJob(ctx, e.c.projectID, &insertJobConf{job: job})
|
||||
return e.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
}
|
||||
|
|
414
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
414
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
|
@ -15,6 +15,7 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
|
@ -26,7 +27,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -42,7 +46,7 @@ var (
|
|||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "num", Type: IntegerFieldType},
|
||||
}
|
||||
fiveMinutesFromNow time.Time
|
||||
testTableExpiration time.Time
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -79,6 +83,7 @@ func initIntegrationTest() {
|
|||
if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
log.Fatalf("creating dataset: %v", err)
|
||||
}
|
||||
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
|
||||
}
|
||||
|
||||
func TestIntegration_Create(t *testing.T) {
|
||||
|
@ -136,7 +141,7 @@ func TestIntegration_TableMetadata(t *testing.T) {
|
|||
if got, want := md.Type, RegularTable; got != want {
|
||||
t.Errorf("metadata.Type: got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := md.ExpirationTime, fiveMinutesFromNow; !got.Equal(want) {
|
||||
if got, want := md.ExpirationTime, testTableExpiration; !got.Equal(want) {
|
||||
t.Errorf("metadata.Type: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
|
@ -221,32 +226,37 @@ func TestIntegration_Tables(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
wantName := table.FullyQualifiedName()
|
||||
|
||||
// Iterate over tables in the dataset.
|
||||
it := dataset.Tables(ctx)
|
||||
var tables []*Table
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
// This test is flaky due to eventual consistency.
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
// Iterate over tables in the dataset.
|
||||
it := dataset.Tables(ctx)
|
||||
var tableNames []string
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
tableNames = append(tableNames, tbl.FullyQualifiedName())
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
// Other tests may be running with this dataset, so there might be more
|
||||
// than just our table in the list. So don't try for an exact match; just
|
||||
// make sure that our table is there somewhere.
|
||||
for _, tn := range tableNames {
|
||||
if tn == wantName {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
tables = append(tables, tbl)
|
||||
}
|
||||
// Other tests may be running with this dataset, so there might be more
|
||||
// than just our table in the list. So don't try for an exact match; just
|
||||
// make sure that our table is there somewhere.
|
||||
found := false
|
||||
for _, tbl := range tables {
|
||||
if reflect.DeepEqual(tbl, table) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Tables: got %v\nshould see %v in the list", pretty.Value(tables), pretty.Value(table))
|
||||
return false, fmt.Errorf("got %v\nwant %s in the list", tableNames, wantName)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -302,6 +312,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job2, err := client.JobFromID(ctx, job1.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -312,6 +323,18 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
}
|
||||
checkRead(t, "job.Read", rit, wantRows)
|
||||
|
||||
// Get statistics.
|
||||
jobStatus, err := job2.Status(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if jobStatus.Statistics == nil {
|
||||
t.Fatal("jobStatus missing statistics")
|
||||
}
|
||||
if _, ok := jobStatus.Statistics.Details.(*QueryStatistics); !ok {
|
||||
t.Errorf("expected QueryStatistics, got %T", jobStatus.Statistics.Details)
|
||||
}
|
||||
|
||||
// Test reading directly into a []Value.
|
||||
valueLists, err := readAll(table.Read(ctx))
|
||||
if err != nil {
|
||||
|
@ -348,20 +371,39 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type SubSubTestStruct struct {
|
||||
Integer int64
|
||||
}
|
||||
|
||||
type SubTestStruct struct {
|
||||
String string
|
||||
Record SubSubTestStruct
|
||||
RecordArray []SubSubTestStruct
|
||||
}
|
||||
|
||||
type TestStruct struct {
|
||||
Name string
|
||||
Nums []int
|
||||
Sub Sub
|
||||
Subs []*Sub
|
||||
}
|
||||
Name string
|
||||
Bytes []byte
|
||||
Integer int64
|
||||
Float float64
|
||||
Boolean bool
|
||||
Timestamp time.Time
|
||||
Date civil.Date
|
||||
Time civil.Time
|
||||
DateTime civil.DateTime
|
||||
|
||||
type Sub struct {
|
||||
B bool
|
||||
SubSub SubSub
|
||||
SubSubs []*SubSub
|
||||
}
|
||||
StringArray []string
|
||||
IntegerArray []int64
|
||||
FloatArray []float64
|
||||
BooleanArray []bool
|
||||
TimestampArray []time.Time
|
||||
DateArray []civil.Date
|
||||
TimeArray []civil.Time
|
||||
DateTimeArray []civil.DateTime
|
||||
|
||||
type SubSub struct{ Count int }
|
||||
Record SubTestStruct
|
||||
RecordArray []SubTestStruct
|
||||
}
|
||||
|
||||
func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||
if client == nil {
|
||||
|
@ -376,16 +418,61 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 4, 5, 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 0, time.UTC)
|
||||
dtm := civil.DateTime{d, tm}
|
||||
|
||||
d2 := civil.Date{1994, 5, 15}
|
||||
tm2 := civil.Time{1, 2, 4, 0}
|
||||
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
||||
dtm2 := civil.DateTime{d2, tm2}
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
want := []*TestStruct{
|
||||
{Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{B: false}, {B: true}}},
|
||||
{Name: "b", Nums: []int{1}, Subs: []*Sub{{B: false}, {B: false}, {B: true}}},
|
||||
{Name: "c", Sub: Sub{B: true}},
|
||||
{
|
||||
Name: "d",
|
||||
Sub: Sub{SubSub: SubSub{12}, SubSubs: []*SubSub{{1}, {2}, {3}}},
|
||||
Subs: []*Sub{{B: false, SubSub: SubSub{4}}, {B: true, SubSubs: []*SubSub{{5}, {6}}}},
|
||||
"a",
|
||||
[]byte("byte"),
|
||||
42,
|
||||
3.14,
|
||||
true,
|
||||
ts,
|
||||
d,
|
||||
tm,
|
||||
dtm,
|
||||
[]string{"a", "b"},
|
||||
[]int64{1, 2},
|
||||
[]float64{1, 1.41},
|
||||
[]bool{true, false},
|
||||
[]time.Time{ts, ts2},
|
||||
[]civil.Date{d, d2},
|
||||
[]civil.Time{tm, tm2},
|
||||
[]civil.DateTime{dtm, dtm2},
|
||||
SubTestStruct{
|
||||
"string",
|
||||
SubSubTestStruct{24},
|
||||
[]SubSubTestStruct{{1}, {2}},
|
||||
},
|
||||
[]SubTestStruct{
|
||||
{String: "empty"},
|
||||
{
|
||||
"full",
|
||||
SubSubTestStruct{1},
|
||||
[]SubSubTestStruct{{1}, {2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "b",
|
||||
Bytes: []byte("byte2"),
|
||||
Integer: 24,
|
||||
Float: 4.13,
|
||||
Boolean: false,
|
||||
Timestamp: ts,
|
||||
Date: d,
|
||||
Time: tm,
|
||||
DateTime: dtm,
|
||||
},
|
||||
}
|
||||
var savers []*StructSaver
|
||||
|
@ -573,28 +660,38 @@ func TestIntegration_DML(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
// Retry insert; sometimes it fails with INTERNAL.
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Use DML to insert.
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(0)},
|
||||
[]Value{"b", int64(1)},
|
||||
[]Value{"c", int64(2)},
|
||||
}
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+
|
||||
"VALUES ('a', 0), ('b', 1), ('c', 2)",
|
||||
table.TableID)
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
// Use DML to insert.
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(0)},
|
||||
[]Value{"b", int64(1)},
|
||||
[]Value{"c", int64(2)},
|
||||
}
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+
|
||||
"VALUES ('a', 0), ('b', 1), ('c', 2)",
|
||||
table.TableID)
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
|
||||
// Stop on read error, because that has never been flaky.
|
||||
return true, errors.New(msg)
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "INSERT", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func TestIntegration_TimeTypes(t *testing.T) {
|
||||
|
@ -645,12 +742,183 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||
checkRead(t, "TimeTypes", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func TestIntegration_StandardQuery(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 04, 05, 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
dtm := ts.Format("2006-01-02 15:04:05")
|
||||
|
||||
// Constructs Value slices made up of int64s.
|
||||
ints := func(args ...int) []Value {
|
||||
vals := make([]Value, len(args))
|
||||
for i, arg := range args {
|
||||
vals[i] = int64(arg)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
query string
|
||||
wantRow []Value
|
||||
}{
|
||||
{"SELECT 1", ints(1)},
|
||||
{"SELECT 1.3", []Value{1.3}},
|
||||
{"SELECT TRUE", []Value{true}},
|
||||
{"SELECT 'ABC'", []Value{"ABC"}},
|
||||
{"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}},
|
||||
{fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}},
|
||||
{fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}},
|
||||
{fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}},
|
||||
{fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{d, tm}}},
|
||||
{fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}},
|
||||
{fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}},
|
||||
{"SELECT (1, 2)", []Value{ints(1, 2)}},
|
||||
{"SELECT [1, 2, 3]", []Value{ints(1, 2, 3)}},
|
||||
{"SELECT ([1, 2], 3, [4, 5])", []Value{[]Value{ints(1, 2), int64(3), ints(4, 5)}}},
|
||||
{"SELECT [(1, 2, 3), (4, 5, 6)]", []Value{[]Value{ints(1, 2, 3), ints(4, 5, 6)}}},
|
||||
{"SELECT [([1, 2, 3], 4), ([5, 6], 7)]", []Value{[]Value{[]Value{ints(1, 2, 3), int64(4)}, []Value{ints(5, 6), int64(7)}}}},
|
||||
{"SELECT ARRAY(SELECT STRUCT([1, 2]))", []Value{[]Value{[]Value{ints(1, 2)}}}},
|
||||
}
|
||||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
q.UseStandardSQL = true
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "StandardQuery", it, [][]Value{c.wantRow})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_LegacyQuery(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
dtm := ts.Format("2006-01-02 15:04:05")
|
||||
|
||||
testCases := []struct {
|
||||
query string
|
||||
wantRow []Value
|
||||
}{
|
||||
{"SELECT 1", []Value{int64(1)}},
|
||||
{"SELECT 1.3", []Value{1.3}},
|
||||
{"SELECT TRUE", []Value{true}},
|
||||
{"SELECT 'ABC'", []Value{"ABC"}},
|
||||
{"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}},
|
||||
{fmt.Sprintf("SELECT TIMESTAMP('%s')", dtm), []Value{ts}},
|
||||
{fmt.Sprintf("SELECT DATE(TIMESTAMP('%s'))", dtm), []Value{"2016-03-20"}},
|
||||
{fmt.Sprintf("SELECT TIME(TIMESTAMP('%s'))", dtm), []Value{"15:04:05"}},
|
||||
}
|
||||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "LegacyQuery", it, [][]Value{c.wantRow})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_QueryParameters(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 04, 05, 0}
|
||||
dtm := civil.DateTime{d, tm}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
|
||||
type ss struct {
|
||||
String string
|
||||
}
|
||||
|
||||
type s struct {
|
||||
Timestamp time.Time
|
||||
StringArray []string
|
||||
SubStruct ss
|
||||
SubStructArray []ss
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
query string
|
||||
parameters []QueryParameter
|
||||
wantRow []Value
|
||||
}{
|
||||
{"SELECT @val", []QueryParameter{{"val", 1}}, []Value{int64(1)}},
|
||||
{"SELECT @val", []QueryParameter{{"val", 1.3}}, []Value{1.3}},
|
||||
{"SELECT @val", []QueryParameter{{"val", true}}, []Value{true}},
|
||||
{"SELECT @val", []QueryParameter{{"val", "ABC"}}, []Value{"ABC"}},
|
||||
{"SELECT @val", []QueryParameter{{"val", []byte("foo")}}, []Value{[]byte("foo")}},
|
||||
{"SELECT @val", []QueryParameter{{"val", ts}}, []Value{ts}},
|
||||
{"SELECT @val", []QueryParameter{{"val", []time.Time{ts, ts}}}, []Value{[]Value{ts, ts}}},
|
||||
{"SELECT @val", []QueryParameter{{"val", dtm}}, []Value{dtm}},
|
||||
{"SELECT @val", []QueryParameter{{"val", d}}, []Value{d}},
|
||||
{"SELECT @val", []QueryParameter{{"val", tm}}, []Value{tm}},
|
||||
{"SELECT @val", []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
|
||||
[]Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}},
|
||||
{"SELECT @val.Timestamp, @val.SubStruct.String", []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, []Value{ts, "a"}},
|
||||
}
|
||||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
q.Parameters = c.parameters
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "QueryParameters", it, [][]Value{c.wantRow})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ReadNullIntoStruct(t *testing.T) {
|
||||
// Reading a null into a struct field should return an error (not panic).
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
upl := table.Uploader()
|
||||
row := &ValuesSaver{
|
||||
Schema: schema,
|
||||
Row: []Value{"name", nil},
|
||||
}
|
||||
if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
|
||||
q.DefaultProjectID = dataset.ProjectID
|
||||
q.DefaultDatasetID = dataset.DatasetID
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type S struct{ Num int64 }
|
||||
var s S
|
||||
if err := it.Next(&s); err == nil {
|
||||
t.Fatal("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new, temporary table with a unique name and the given schema.
|
||||
func newTable(t *testing.T, s Schema) *Table {
|
||||
fiveMinutesFromNow = time.Now().Add(5 * time.Minute).Round(time.Second)
|
||||
name := fmt.Sprintf("t%d", time.Now().UnixNano())
|
||||
table := dataset.Table(name)
|
||||
err := table.Create(context.Background(), s, TableExpiration(fiveMinutesFromNow))
|
||||
err := table.Create(context.Background(), s, TableExpiration(testTableExpiration))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -658,21 +926,28 @@ func newTable(t *testing.T, s Schema) *Table {
|
|||
}
|
||||
|
||||
func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
|
||||
if msg2, ok := compareRead(it, want); !ok {
|
||||
t.Errorf("%s: %s", msg, msg2)
|
||||
}
|
||||
}
|
||||
|
||||
func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
|
||||
got, err := readAll(it)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
return err.Error(), false
|
||||
}
|
||||
if len(got) != len(want) {
|
||||
t.Errorf("%s: got %d rows, want %d", msg, len(got), len(want))
|
||||
return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false
|
||||
}
|
||||
sort.Sort(byCol0(got))
|
||||
for i, r := range got {
|
||||
gotRow := []Value(r)
|
||||
wantRow := want[i]
|
||||
if !reflect.DeepEqual(gotRow, wantRow) {
|
||||
t.Errorf("%s #%d: got %v, want %v", msg, i, gotRow, wantRow)
|
||||
return fmt.Sprintf("#%d: got %v, want %v", i, gotRow, wantRow), false
|
||||
}
|
||||
}
|
||||
return "", true
|
||||
}
|
||||
|
||||
func readAll(it *RowIterator) ([][]Value, error) {
|
||||
|
@ -721,6 +996,15 @@ func wait(ctx context.Context, job *Job) error {
|
|||
if status.Err() != nil {
|
||||
return fmt.Errorf("job status error: %#v", status.Err())
|
||||
}
|
||||
if status.Statistics == nil {
|
||||
return errors.New("nil Statistics")
|
||||
}
|
||||
if status.Statistics.EndTime.IsZero() {
|
||||
return errors.New("EndTime is zero")
|
||||
}
|
||||
if status.Statistics.Details == nil {
|
||||
return errors.New("nil Statistics.Details")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
12
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
12
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
|
@ -92,6 +92,9 @@ type RowIterator struct {
|
|||
// A repeated field corresponds to a slice or array of the element type.
|
||||
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer.
|
||||
// All calls to Next on the same iterator must use the same struct type.
|
||||
//
|
||||
// It is an error to attempt to read a BigQuery NULL value into a struct field.
|
||||
// If your table contains NULLs, use a *[]Value or *map[string]Value.
|
||||
func (it *RowIterator) Next(dst interface{}) error {
|
||||
var vl ValueLoader
|
||||
switch dst := dst.(type) {
|
||||
|
@ -141,14 +144,7 @@ func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||
pc.startIndex = it.StartIndex
|
||||
}
|
||||
it.pf.setPaging(pc)
|
||||
var res *readDataResult
|
||||
var err error
|
||||
for {
|
||||
res, err = it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
if err != errIncompleteJob {
|
||||
break
|
||||
}
|
||||
}
|
||||
res, err := it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
50
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
50
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
|
@ -284,56 +284,6 @@ func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) {
|
|||
}
|
||||
}
|
||||
|
||||
type delayedPageFetcher struct {
|
||||
pageFetcherStub
|
||||
delayCount int
|
||||
}
|
||||
|
||||
func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
if pf.delayCount > 0 {
|
||||
pf.delayCount--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return pf.pageFetcherStub.fetch(ctx, s, token)
|
||||
}
|
||||
|
||||
func TestIterateIncompleteJob(t *testing.T) {
|
||||
want := [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
|
||||
pf := pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
dpf := &delayedPageFetcher{
|
||||
pageFetcherStub: pf,
|
||||
delayCount: 1,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, dpf)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
|
||||
}
|
||||
if dpf.delayCount != 0 {
|
||||
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextDuringErrorState(t *testing.T) {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
|
|
219
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
219
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
|
@ -15,6 +15,9 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -23,28 +26,24 @@ import (
|
|||
|
||||
// A Job represents an operation which has been submitted to BigQuery for processing.
|
||||
type Job struct {
|
||||
service service
|
||||
c *Client
|
||||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
isQuery bool
|
||||
destinationTable *bq.TableReference // table to read query results from
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
jobType, err := c.service.getJobType(ctx, c.projectID, id)
|
||||
job, err := c.service.getJob(ctx, c.projectID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Job{
|
||||
service: c.service,
|
||||
projectID: c.projectID,
|
||||
jobID: id,
|
||||
isQuery: jobType == queryJobType,
|
||||
}, nil
|
||||
job.c = c
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (j *Job) ID() string {
|
||||
|
@ -69,6 +68,9 @@ type JobStatus struct {
|
|||
// All errors encountered during the running of the job.
|
||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
|
||||
Errors []*Error
|
||||
|
||||
// Statistics about the job.
|
||||
Statistics *JobStatistics
|
||||
}
|
||||
|
||||
// setJobRef initializes job's JobReference if given a non-empty jobID.
|
||||
|
@ -99,22 +101,47 @@ func (s *JobStatus) Err() error {
|
|||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
return j.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fill in the client field of Tables in the statistics.
|
||||
if js.Statistics != nil {
|
||||
if qs, ok := js.Statistics.Details.(*QueryStatistics); ok {
|
||||
for _, t := range qs.ReferencedTables {
|
||||
t.c = j.c
|
||||
}
|
||||
}
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
return j.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
return j.c.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
// Wait blocks until the job or th context is done. It returns the final status
|
||||
// Wait blocks until the job or the context is done. It returns the final status
|
||||
// of the job.
|
||||
// If an error occurs while retrieving the status, Wait returns that error. But
|
||||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
if j.isQuery {
|
||||
// We can avoid polling for query jobs.
|
||||
if _, err := j.c.service.waitForQuery(ctx, j.projectID, j.jobID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: extra RPC even if you just want to wait for the query to finish.
|
||||
js, err := j.Status(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
// Non-query jobs must poll.
|
||||
var js *JobStatus
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
js, err = j.Status(ctx)
|
||||
|
@ -131,3 +158,169 @@ func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
|||
}
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return nil, errors.New("bigquery: cannot read from a non-query job")
|
||||
}
|
||||
var projectID string
|
||||
if j.destinationTable != nil {
|
||||
projectID = j.destinationTable.ProjectId
|
||||
} else {
|
||||
projectID = j.c.projectID
|
||||
}
|
||||
|
||||
schema, err := j.c.service.waitForQuery(ctx, projectID, j.jobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if j.destinationTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
}
|
||||
return newRowIterator(ctx, j.c.service, &readTableConf{
|
||||
projectID: j.destinationTable.ProjectId,
|
||||
datasetID: j.destinationTable.DatasetId,
|
||||
tableID: j.destinationTable.TableId,
|
||||
schema: schema,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// JobStatistics contains statistics about a job.
|
||||
type JobStatistics struct {
|
||||
CreationTime time.Time
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
TotalBytesProcessed int64
|
||||
|
||||
Details Statistics
|
||||
}
|
||||
|
||||
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
|
||||
type Statistics interface {
|
||||
implementsStatistics()
|
||||
}
|
||||
|
||||
// ExtractStatistics contains statistics about an extract job.
|
||||
type ExtractStatistics struct {
|
||||
// The number of files per destination URI or URI pattern specified in the
|
||||
// extract configuration. These values will be in the same order as the
|
||||
// URIs specified in the 'destinationUris' field.
|
||||
DestinationURIFileCounts []int64
|
||||
}
|
||||
|
||||
// LoadStatistics contains statistics about a load job.
|
||||
type LoadStatistics struct {
|
||||
// The number of bytes of source data in a load job.
|
||||
InputFileBytes int64
|
||||
|
||||
// The number of source files in a load job.
|
||||
InputFiles int64
|
||||
|
||||
// Size of the loaded data in bytes. Note that while a load job is in the
|
||||
// running state, this value may change.
|
||||
OutputBytes int64
|
||||
|
||||
// The number of rows imported in a load job. Note that while an import job is
|
||||
// in the running state, this value may change.
|
||||
OutputRows int64
|
||||
}
|
||||
|
||||
// QueryStatistics contains statistics about a query job.
|
||||
type QueryStatistics struct {
|
||||
// Billing tier for the job.
|
||||
BillingTier int64
|
||||
|
||||
// Whether the query result was fetched from the query cache.
|
||||
CacheHit bool
|
||||
|
||||
// The type of query statement, if valid.
|
||||
StatementType string
|
||||
|
||||
// Total bytes billed for the job.
|
||||
TotalBytesBilled int64
|
||||
|
||||
// Total bytes processed for the job.
|
||||
TotalBytesProcessed int64
|
||||
|
||||
// Describes execution plan for the query.
|
||||
QueryPlan []*ExplainQueryStage
|
||||
|
||||
// The number of rows affected by a DML statement. Present only for DML
|
||||
// statements INSERT, UPDATE or DELETE.
|
||||
NumDMLAffectedRows int64
|
||||
|
||||
// ReferencedTables: [Output-only, Experimental] Referenced tables for
|
||||
// the job. Queries that reference more than 50 tables will not have a
|
||||
// complete list.
|
||||
ReferencedTables []*Table
|
||||
|
||||
// The schema of the results. Present only for successful dry run of
|
||||
// non-legacy SQL queries.
|
||||
Schema Schema
|
||||
|
||||
// Standard SQL: list of undeclared query parameter names detected during a
|
||||
// dry run validation.
|
||||
UndeclaredQueryParameterNames []string
|
||||
}
|
||||
|
||||
// ExplainQueryStage describes one stage of a query.
|
||||
type ExplainQueryStage struct {
|
||||
// Relative amount of the total time the average shard spent on CPU-bound tasks.
|
||||
ComputeRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
|
||||
ComputeRatioMax float64
|
||||
|
||||
// Unique ID for stage within plan.
|
||||
ID int64
|
||||
|
||||
// Human-readable name for stage.
|
||||
Name string
|
||||
|
||||
// Relative amount of the total time the average shard spent reading input.
|
||||
ReadRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent reading input.
|
||||
ReadRatioMax float64
|
||||
|
||||
// Number of records read into the stage.
|
||||
RecordsRead int64
|
||||
|
||||
// Number of records written by the stage.
|
||||
RecordsWritten int64
|
||||
|
||||
// Current status for the stage.
|
||||
Status string
|
||||
|
||||
// List of operations within the stage in dependency order (approximately
|
||||
// chronological).
|
||||
Steps []*ExplainQueryStep
|
||||
|
||||
// Relative amount of the total time the average shard spent waiting to be scheduled.
|
||||
WaitRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent waiting to be scheduled.
|
||||
WaitRatioMax float64
|
||||
|
||||
// Relative amount of the total time the average shard spent on writing output.
|
||||
WriteRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on writing output.
|
||||
WriteRatioMax float64
|
||||
}
|
||||
|
||||
// ExplainQueryStep describes one step of a query stage.
|
||||
type ExplainQueryStep struct {
|
||||
// Machine-readable operation type.
|
||||
Kind string
|
||||
|
||||
// Human-readable stage descriptions.
|
||||
Substeps []string
|
||||
}
|
||||
|
||||
func (*ExtractStatistics) implementsStatistics() {}
|
||||
func (*LoadStatistics) implementsStatistics() {}
|
||||
func (*QueryStatistics) implementsStatistics() {}
|
||||
|
|
2
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
|
@ -82,5 +82,5 @@ func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
|||
|
||||
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
|
||||
|
||||
return l.c.service.insertJob(ctx, l.c.projectID, conf)
|
||||
return l.c.insertJob(ctx, conf)
|
||||
}
|
||||
|
|
14
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
14
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
|
@ -46,7 +46,7 @@ type QueryConfig struct {
|
|||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
// The default is WriteEmpty.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// DisableQueryCache prevents results being fetched from the query cache.
|
||||
|
@ -132,7 +132,7 @@ func (q *Query) Run(ctx context.Context) (*Job, error) {
|
|||
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.service.insertJob(ctx, q.client.projectID, &insertJobConf{job: job})
|
||||
j, err := q.client.insertJob(ctx, &insertJobConf{job: job})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -194,3 +194,13 @@ func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) err
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
// It is a shorthand for Query.Run followed by Job.Read.
|
||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx)
|
||||
}
|
||||
|
|
64
vendor/cloud.google.com/go/bigquery/read.go
generated
vendored
64
vendor/cloud.google.com/go/bigquery/read.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return nil, errors.New("Cannot read from a non-query job")
|
||||
}
|
||||
return newRowIterator(ctx, j.service, &readQueryConf{
|
||||
projectID: j.projectID,
|
||||
jobID: j.jobID,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
// It is a shorthand for Query.Run followed by Job.Read.
|
||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx)
|
||||
}
|
91
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
91
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
|
@ -28,11 +29,6 @@ type readTabledataArgs struct {
|
|||
tok string
|
||||
}
|
||||
|
||||
type readQueryArgs struct {
|
||||
conf *readQueryConf
|
||||
tok string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
|
@ -41,7 +37,6 @@ type readServiceStub struct {
|
|||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
readQueryCalls []readQueryArgs
|
||||
|
||||
service
|
||||
}
|
||||
|
@ -55,13 +50,13 @@ func (s *readServiceStub) readValues(tok string) *readDataResult {
|
|||
|
||||
return result
|
||||
}
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
|
||||
func (s *readServiceStub) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token})
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
|
@ -77,8 +72,13 @@ func TestRead(t *testing.T) {
|
|||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
c: c,
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
|
@ -159,55 +159,6 @@ func TestNoMoreValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// delayedReadStub simulates reading results from a query that has not yet
|
||||
// completed. Its readQuery method initially reports that the query job is not
|
||||
// yet complete. Subsequently, it proxies the request through to another
|
||||
// service stub.
|
||||
type delayedReadStub struct {
|
||||
numDelays int
|
||||
|
||||
readServiceStub
|
||||
}
|
||||
|
||||
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
if s.numDelays > 0 {
|
||||
s.numDelays--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return s.readServiceStub.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
|
||||
func TestIncompleteJob(t *testing.T) {
|
||||
service := &delayedReadStub{
|
||||
numDelays: 2,
|
||||
readServiceStub: readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
},
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
var got []Value
|
||||
want := []Value{1, 2}
|
||||
if err := it.Next(&got); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if service.numDelays != 0 {
|
||||
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
@ -272,8 +223,13 @@ func TestReadQueryOptions(t *testing.T) {
|
|||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: s,
|
||||
c: &Client{service: s},
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
|
@ -285,10 +241,11 @@ func TestReadQueryOptions(t *testing.T) {
|
|||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []readQueryArgs{{
|
||||
conf: &readQueryConf{
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
|
@ -297,7 +254,7 @@ func TestReadQueryOptions(t *testing.T) {
|
|||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readQueryCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
|
||||
if !reflect.DeepEqual(s.readTabledataCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
5
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
|
@ -98,6 +98,9 @@ func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
|||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
|
@ -283,7 +286,7 @@ func (l *typeList) has(t reflect.Type) bool {
|
|||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
|
||||
// via exported fields. (Schema inference ignores unexported fields.)
|
||||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
|
|
4
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
|
@ -769,6 +769,9 @@ func TestHasRecursiveType(t *testing.T) {
|
|||
A int
|
||||
R *rec
|
||||
}
|
||||
recSlicePointer struct {
|
||||
A []*recSlicePointer
|
||||
}
|
||||
)
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
|
@ -780,6 +783,7 @@ func TestHasRecursiveType(t *testing.T) {
|
|||
{rec{}, true},
|
||||
{recUnexported{}, false},
|
||||
{hasRec{}, true},
|
||||
{&recSlicePointer{}, true},
|
||||
} {
|
||||
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
|
||||
if err != nil {
|
||||
|
|
316
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
316
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
|
@ -15,7 +15,6 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -23,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
@ -37,7 +37,7 @@ import (
|
|||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
|
||||
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
|
||||
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
|
||||
jobCancel(ctx context.Context, projectId, jobID string) error
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
|
||||
|
@ -61,15 +61,19 @@ type service interface {
|
|||
|
||||
// Misc
|
||||
|
||||
// readQuery reads data resulting from a query job. If the job is
|
||||
// incomplete, an errIncompleteJob is returned. readQuery may be called
|
||||
// repeatedly to poll for job completion.
|
||||
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
|
||||
// Waits for a query to complete.
|
||||
waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error)
|
||||
|
||||
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
|
||||
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
|
||||
}
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
@ -104,16 +108,43 @@ type insertJobConf struct {
|
|||
media io.Reader
|
||||
}
|
||||
|
||||
// Calls the Jobs.Insert RPC and returns a Job. Callers must set the returned Job's
|
||||
// client.
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if conf.media != nil {
|
||||
call.Media(conf.media)
|
||||
}
|
||||
res, err := call.Do()
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if conf.job.JobReference != nil && conf.media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
|
||||
|
||||
var dt *bq.TableReference
|
||||
if qc := res.Configuration.Query; qc != nil {
|
||||
dt = qc.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: projectID,
|
||||
jobID: res.JobReference.JobId,
|
||||
destinationTable: dt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
|
@ -129,6 +160,12 @@ type readTableConf struct {
|
|||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
|
@ -136,14 +173,10 @@ type readDataResult struct {
|
|||
schema Schema
|
||||
}
|
||||
|
||||
type readQueryConf struct {
|
||||
projectID, jobID string
|
||||
paging pagingConf
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
setClientHeader(req.Header())
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
|
@ -195,47 +228,30 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
|
|||
return result, nil
|
||||
}
|
||||
|
||||
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
|
||||
|
||||
// getQueryResultsTimeout controls the maximum duration of a request to the
|
||||
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
|
||||
// cause increased overall latency, as results are returned as soon as they are
|
||||
// available.
|
||||
const getQueryResultsTimeout = time.Minute
|
||||
|
||||
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
|
||||
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
|
||||
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
func (s *bigqueryService) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
req := s.s.Jobs.GetQueryResults(projectID, jobID).Context(ctx).MaxResults(0)
|
||||
setClientHeader(req.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = req.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !res.JobComplete {
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
schema := convertTableSchema(res.Schema)
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: res.TotalRows,
|
||||
schema: schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
return convertTableSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
type insertRowsConf struct {
|
||||
|
@ -263,7 +279,9 @@ func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID,
|
|||
var res *bq.TableDataInsertAllResponse
|
||||
err := runWithRetry(ctx, func() error {
|
||||
var err error
|
||||
res, err = s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do()
|
||||
req := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -290,37 +308,26 @@ func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID,
|
|||
return errs
|
||||
}
|
||||
|
||||
type jobType int
|
||||
|
||||
const (
|
||||
copyJobType jobType = iota
|
||||
extractJobType
|
||||
loadJobType
|
||||
queryJobType
|
||||
)
|
||||
|
||||
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
|
||||
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("configuration").
|
||||
Context(ctx).
|
||||
Do()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case res.Configuration.Copy != nil:
|
||||
return copyJobType, nil
|
||||
case res.Configuration.Extract != nil:
|
||||
return extractJobType, nil
|
||||
case res.Configuration.Load != nil:
|
||||
return loadJobType, nil
|
||||
case res.Configuration.Query != nil:
|
||||
return queryJobType, nil
|
||||
default:
|
||||
return 0, errors.New("unknown job type")
|
||||
var isQuery bool
|
||||
var dest *bq.TableReference
|
||||
if res.Configuration.Query != nil {
|
||||
isQuery = true
|
||||
dest = res.Configuration.Query.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: projectID,
|
||||
jobID: jobID,
|
||||
isQuery: isQuery,
|
||||
destinationTable: dest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
|
||||
|
@ -338,13 +345,18 @@ func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string
|
|||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("status"). // Only fetch what we need.
|
||||
Fields("status", "statistics"). // Only fetch what we need.
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jobStatusFromProto(res.Status)
|
||||
st, err := jobStatusFromProto(res.Status)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.Statistics = jobStatisticsFromProto(res.Statistics)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
@ -369,12 +381,87 @@ func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
|||
return newStatus, nil
|
||||
}
|
||||
|
||||
func jobStatisticsFromProto(s *bq.JobStatistics) *JobStatistics {
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, convertTableReference(tr))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: convertTableSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
return js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
req := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
|
@ -383,7 +470,7 @@ func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID s
|
|||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, s.convertListedTable(t))
|
||||
tables = append(tables, convertTableReference(t.TableReference))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
@ -433,12 +520,16 @@ func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf
|
|||
}
|
||||
}
|
||||
|
||||
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
|
||||
req := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
table, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -446,7 +537,9 @@ func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datas
|
|||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
|
@ -468,9 +561,17 @@ func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
|||
md.View = t.View.Query
|
||||
}
|
||||
if t.TimePartitioning != nil {
|
||||
md.TimePartitioning = &TimePartitioning{time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond}
|
||||
md.TimePartitioning = &TimePartitioning{
|
||||
Expiration: time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond,
|
||||
}
|
||||
}
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
|
||||
return md
|
||||
}
|
||||
|
||||
|
@ -498,11 +599,11 @@ func unixMillisToTime(m int64) time.Time {
|
|||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table {
|
||||
func convertTableReference(tr *bq.TableReference) *Table {
|
||||
return &Table{
|
||||
ProjectID: t.TableReference.ProjectId,
|
||||
DatasetID: t.TableReference.DatasetId,
|
||||
TableID: t.TableReference.TableId,
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -545,16 +646,22 @@ func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectI
|
|||
ds := &bq.Dataset{
|
||||
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
|
||||
}
|
||||
_, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do()
|
||||
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
return s.s.Datasets.Delete(projectID, datasetID).Context(ctx).Do()
|
||||
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
|
||||
table, err := s.s.Datasets.Get(projectID, datasetID).Context(ctx).Do()
|
||||
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
table, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -566,6 +673,7 @@ func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, ma
|
|||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
All(all)
|
||||
setClientHeader(req.Header())
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
|
@ -605,19 +713,19 @@ func runWithRetry(ctx context.Context, call func() error) error {
|
|||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return true, err
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
// Retry using the criteria in
|
||||
// https://cloud.google.com/bigquery/troubleshooting-errors
|
||||
if reason == "backendError" && (e.Code == 500 || e.Code == 503) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// Use the criteria in https://cloud.google.com/bigquery/troubleshooting-errors.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" && (e.Code == 500 || e.Code == 503)
|
||||
}
|
||||
|
|
83
vendor/cloud.google.com/go/bigquery/service_test.go
generated
vendored
Normal file
83
vendor/cloud.google.com/go/bigquery/service_test.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestBQTableToMetadata(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
aTimeMillis := aTime.UnixNano() / 1e6
|
||||
for _, test := range []struct {
|
||||
in *bq.Table
|
||||
want *TableMetadata
|
||||
}{
|
||||
{&bq.Table{}, &TableMetadata{}}, // test minimal case
|
||||
{
|
||||
&bq.Table{
|
||||
CreationTime: aTimeMillis,
|
||||
Description: "desc",
|
||||
Etag: "etag",
|
||||
ExpirationTime: aTimeMillis,
|
||||
FriendlyName: "fname",
|
||||
Id: "id",
|
||||
LastModifiedTime: uint64(aTimeMillis),
|
||||
Location: "loc",
|
||||
NumBytes: 123,
|
||||
NumLongTermBytes: 23,
|
||||
NumRows: 7,
|
||||
StreamingBuffer: &bq.Streamingbuffer{
|
||||
EstimatedBytes: 11,
|
||||
EstimatedRows: 3,
|
||||
OldestEntryTime: uint64(aTimeMillis),
|
||||
},
|
||||
TimePartitioning: &bq.TimePartitioning{
|
||||
ExpirationMs: 7890,
|
||||
Type: "DAY",
|
||||
},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
},
|
||||
&TableMetadata{
|
||||
Description: "desc",
|
||||
Name: "fname",
|
||||
View: "view-query",
|
||||
ID: "id",
|
||||
Type: ExternalTable,
|
||||
ExpirationTime: aTime.Truncate(time.Millisecond),
|
||||
CreationTime: aTime.Truncate(time.Millisecond),
|
||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||
NumBytes: 123,
|
||||
NumRows: 7,
|
||||
TimePartitioning: &TimePartitioning{Expiration: time.Duration(7890) * time.Millisecond},
|
||||
StreamingBuffer: &StreamingBuffer{
|
||||
EstimatedBytes: 11,
|
||||
EstimatedRows: 3,
|
||||
OldestEntryTime: aTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := bqTableToMetadata(test.in)
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
33
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
33
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
|
@ -64,6 +64,11 @@ type TableMetadata struct {
|
|||
|
||||
// The time-based partitioning settings for this table.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// Contains information regarding this table's streaming buffer, if one is
|
||||
// present. This field will be nil if the table is not being streamed to or if
|
||||
// there is no data in the streaming buffer.
|
||||
StreamingBuffer *StreamingBuffer
|
||||
}
|
||||
|
||||
// TableCreateDisposition specifies the circumstances under which destination table will be created.
|
||||
|
@ -101,10 +106,25 @@ const (
|
|||
type TableType string
|
||||
|
||||
const (
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
ExternalTable TableType = "EXTERNAL"
|
||||
)
|
||||
|
||||
// StreamingBuffer holds information about the streaming buffer.
|
||||
type StreamingBuffer struct {
|
||||
// A lower-bound estimate of the number of bytes currently in the streaming
|
||||
// buffer.
|
||||
EstimatedBytes uint64
|
||||
|
||||
// A lower-bound estimate of the number of rows currently in the streaming
|
||||
// buffer.
|
||||
EstimatedRows uint64
|
||||
|
||||
// The time of the oldest entry in the streaming buffer.
|
||||
OldestEntryTime time.Time
|
||||
}
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
|
@ -193,6 +213,15 @@ func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) {
|
|||
conf.timePartitioning = &opt
|
||||
}
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) {
|
||||
var conf patchTableConf
|
||||
|
|
19
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
19
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
|
@ -15,6 +15,7 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
|
@ -114,6 +115,16 @@ func valueSavers(src interface{}) ([]ValueSaver, error) {
|
|||
// Make a ValueSaver from x, which must implement ValueSaver already
|
||||
// or be a struct or pointer to struct.
|
||||
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
||||
if _, ok := x.(StructSaver); ok {
|
||||
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
|
||||
}
|
||||
var insertID string
|
||||
// Handle StructSavers specially so we can infer the schema if necessary.
|
||||
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
|
||||
x = ss.Struct
|
||||
insertID = ss.InsertID
|
||||
// Fall through so we can infer the schema.
|
||||
}
|
||||
if saver, ok := x.(ValueSaver); ok {
|
||||
return saver, ok, nil
|
||||
}
|
||||
|
@ -128,11 +139,15 @@ func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
|||
if v.Kind() != reflect.Struct {
|
||||
return nil, false, nil
|
||||
}
|
||||
schema, err := inferSchemaReflect(v.Type())
|
||||
schema, err := inferSchemaReflectCached(v.Type())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return &StructSaver{Struct: x, Schema: schema}, true, nil
|
||||
return &StructSaver{
|
||||
Struct: x,
|
||||
InsertID: insertID,
|
||||
Schema: schema,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||
|
|
34
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
34
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
|
@ -35,27 +35,18 @@ func (ts testSaver) Save() (map[string]Value, string, error) {
|
|||
func TestRejectsNonValueSavers(t *testing.T) {
|
||||
client := &Client{projectID: "project-id"}
|
||||
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")}
|
||||
|
||||
testCases := []struct {
|
||||
src interface{}
|
||||
}{
|
||||
{
|
||||
src: 1,
|
||||
},
|
||||
{
|
||||
src: []int{1, 2},
|
||||
},
|
||||
{
|
||||
src: []interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
inputs := []interface{}{
|
||||
1,
|
||||
[]int{1, 2},
|
||||
[]interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
StructSaver{},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if err := u.Put(context.Background(), tc.src); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", tc.src)
|
||||
for _, in := range inputs {
|
||||
if err := u.Put(context.Background(), in); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -265,13 +256,16 @@ func TestValueSavers(t *testing.T) {
|
|||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: &T{I: 2}},
|
||||
}},
|
||||
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"},
|
||||
[]ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"},
|
||||
}},
|
||||
} {
|
||||
got, err := valueSavers(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
|
||||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
// Make sure Save is successful.
|
||||
|
|
20
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
20
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
|
@ -103,12 +103,20 @@ type structLoaderOp struct {
|
|||
repeated bool
|
||||
}
|
||||
|
||||
var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs")
|
||||
|
||||
func setAny(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setInt(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
xx := x.(int64)
|
||||
if v.OverflowInt(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
|
@ -118,6 +126,9 @@ func setInt(v reflect.Value, x interface{}) error {
|
|||
}
|
||||
|
||||
func setFloat(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
xx := x.(float64)
|
||||
if v.OverflowFloat(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
|
@ -127,16 +138,25 @@ func setFloat(v reflect.Value, x interface{}) error {
|
|||
}
|
||||
|
||||
func setBool(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.SetBool(x.(bool))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setString(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.SetString(x.(string))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBytes(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
v.SetBytes(x.([]byte))
|
||||
return nil
|
||||
}
|
||||
|
|
93
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
93
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
|
@ -75,7 +76,7 @@ func (ac *AdminClient) instancePrefix() string {
|
|||
|
||||
// Tables returns a list of the tables in the instance.
|
||||
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ListTablesRequest{
|
||||
Parent: prefix,
|
||||
|
@ -94,7 +95,7 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
|||
// CreateTable creates a new table in the instance.
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
|
@ -114,7 +115,7 @@ func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, sp
|
|||
for _, split := range split_keys {
|
||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
|
||||
}
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
|
@ -128,7 +129,7 @@ func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, sp
|
|||
// CreateColumnFamily creates a new column family in a table.
|
||||
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
|
||||
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
|
@ -143,7 +144,7 @@ func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family str
|
|||
|
||||
// DeleteTable deletes a table and all of its data.
|
||||
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DeleteTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
|
@ -154,7 +155,7 @@ func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
|
|||
|
||||
// DeleteColumnFamily deletes a column family in a table and all of its data.
|
||||
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
|
@ -169,12 +170,20 @@ func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family str
|
|||
|
||||
// TableInfo represents information about a table.
|
||||
type TableInfo struct {
|
||||
// DEPRECATED - This field is deprecated. Please use FamilyInfos instead.
|
||||
Families []string
|
||||
FamilyInfos []FamilyInfo
|
||||
}
|
||||
|
||||
// FamilyInfo represents information about a column family.
|
||||
type FamilyInfo struct {
|
||||
Name string
|
||||
GCPolicy string
|
||||
}
|
||||
|
||||
// TableInfo retrieves information about a table.
|
||||
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.GetTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
|
@ -184,8 +193,9 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo,
|
|||
return nil, err
|
||||
}
|
||||
ti := &TableInfo{}
|
||||
for fam := range res.ColumnFamilies {
|
||||
ti.Families = append(ti.Families, fam)
|
||||
for name, fam := range res.ColumnFamilies {
|
||||
ti.Families = append(ti.Families, name)
|
||||
ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)})
|
||||
}
|
||||
return ti, nil
|
||||
}
|
||||
|
@ -194,7 +204,7 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo,
|
|||
// GC executes opportunistically in the background; table reads may return data
|
||||
// matching the GC policy.
|
||||
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
|
@ -207,13 +217,26 @@ func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, po
|
|||
return err
|
||||
}
|
||||
|
||||
// DropRowRange permanently deletes a row range from the specified table.
|
||||
func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DropRowRangeRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)},
|
||||
}
|
||||
_, err := ac.tClient.DropRowRange(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// InstanceAdminClient is a client type for performing admin operations on instances.
|
||||
// These operations can be substantially more dangerous than those provided by AdminClient.
|
||||
type InstanceAdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
iClient btapb.BigtableInstanceAdminClient
|
||||
conn *grpc.ClientConn
|
||||
iClient btapb.BigtableInstanceAdminClient
|
||||
lroClient *lroauto.OperationsClient
|
||||
|
||||
project string
|
||||
|
||||
|
@ -232,9 +255,22 @@ func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InstanceAdminClient{
|
||||
conn: conn,
|
||||
iClient: btapb.NewBigtableInstanceAdminClient(conn),
|
||||
conn: conn,
|
||||
iClient: btapb.NewBigtableInstanceAdminClient(conn),
|
||||
lroClient: lroClient,
|
||||
|
||||
project: project,
|
||||
md: metadata.Pairs(resourcePrefixHeader, "projects/"+project),
|
||||
|
@ -279,7 +315,7 @@ var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][
|
|||
// CreateInstance creates a new instance in the project.
|
||||
// This method will return when the instance has been created or when an error occurs.
|
||||
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.CreateInstanceRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
InstanceId: conf.InstanceId,
|
||||
|
@ -298,12 +334,12 @@ func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *Instan
|
|||
return err
|
||||
}
|
||||
resp := btapb.Instance{}
|
||||
return longrunning.InternalNewOperation(iac.conn, lro).Wait(ctx, &resp)
|
||||
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
// DeleteInstance deletes an instance from the project.
|
||||
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
|
||||
_, err := iac.iClient.DeleteInstance(ctx, req)
|
||||
return err
|
||||
|
@ -311,7 +347,7 @@ func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId s
|
|||
|
||||
// Instances returns a list of instances in the project.
|
||||
func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.ListInstancesRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
}
|
||||
|
@ -333,3 +369,24 @@ func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo,
|
|||
}
|
||||
return is, nil
|
||||
}
|
||||
|
||||
// InstanceInfo returns information about an instance.
|
||||
func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.GetInstanceRequest{
|
||||
Name: "projects/" + iac.project + "/instances/" + instanceId,
|
||||
}
|
||||
res, err := iac.iClient.GetInstance(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := instanceNameRegexp.FindStringSubmatch(res.Name)
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("malformed instance name %q", res.Name)
|
||||
}
|
||||
return &InstanceInfo{
|
||||
Name: m[2],
|
||||
DisplayName: res.DisplayName,
|
||||
}, nil
|
||||
}
|
||||
|
|
64
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
64
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
|
@ -19,7 +19,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
"golang.org/x/net/context"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
|
@ -41,6 +43,22 @@ func TestAdminIntegration(t *testing.T) {
|
|||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
iAdminClient, err := testEnv.NewInstanceAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewInstanceAdminClient: %v", err)
|
||||
}
|
||||
if iAdminClient != nil {
|
||||
defer iAdminClient.Close()
|
||||
|
||||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
|
||||
if err != nil {
|
||||
t.Errorf("InstanceInfo: %v", err)
|
||||
}
|
||||
if iInfo.Name != adminClient.instance {
|
||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
|
||||
}
|
||||
}
|
||||
|
||||
list := func() []string {
|
||||
tbls, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
|
@ -88,4 +106,50 @@ func TestAdminIntegration(t *testing.T) {
|
|||
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) {
|
||||
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
|
||||
}
|
||||
|
||||
// Populate mytable and drop row ranges
|
||||
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
client, err := testEnv.NewClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
tbl := client.Open("mytable")
|
||||
|
||||
prefixes := []string{"a", "b", "c"}
|
||||
for _, prefix := range prefixes {
|
||||
for i := 0; i < 5; i++ {
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 0, []byte("1"))
|
||||
if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil {
|
||||
t.Errorf("DropRowRange a: %v", err)
|
||||
}
|
||||
if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil {
|
||||
t.Errorf("DropRowRange c: %v", err)
|
||||
}
|
||||
if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil {
|
||||
t.Errorf("DropRowRange x: %v", err)
|
||||
}
|
||||
|
||||
var gotRowCount int
|
||||
tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
|
||||
gotRowCount += 1
|
||||
if !strings.HasPrefix(row.Key(), "b") {
|
||||
t.Errorf("Invalid row after dropping range: %v", row)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if gotRowCount != 5 {
|
||||
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
|
||||
}
|
||||
}
|
||||
|
|
73
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
73
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
|
@ -73,7 +73,7 @@ func (c *Client) Close() error {
|
|||
}
|
||||
|
||||
var (
|
||||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted,codes.Internal}
|
||||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted}
|
||||
isIdempotentRetryCode = make(map[codes.Code]bool)
|
||||
retryOptions = []gax.CallOption{
|
||||
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
|
||||
|
@ -120,7 +120,7 @@ func (c *Client) Open(table string) *Table {
|
|||
// By default, the yielded rows will contain all values in all cells.
|
||||
// Use RowFilter to limit the cells returned.
|
||||
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
|
||||
var prevRowKey string
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
|
@ -210,13 +210,16 @@ func decodeFamilyProto(r Row, row string, f *btpb.Family) {
|
|||
}
|
||||
}
|
||||
|
||||
// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange.
|
||||
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
|
||||
type RowSet interface {
|
||||
proto() *btpb.RowSet
|
||||
|
||||
// retainRowsAfter returns a new RowSet that does not include the
|
||||
// given row key or any row key lexicographically less than it.
|
||||
retainRowsAfter(lastRowKey string) RowSet
|
||||
|
||||
// Valid reports whether this set can cover at least one row.
|
||||
valid() bool
|
||||
}
|
||||
|
||||
// RowList is a sequence of row keys.
|
||||
|
@ -240,6 +243,10 @@ func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
|
|||
return retryKeys
|
||||
}
|
||||
|
||||
func (r RowList) valid() bool {
|
||||
return len(r) > 0
|
||||
}
|
||||
|
||||
// A RowRange is a half-open interval [Start, Limit) encompassing
|
||||
// all the rows with keys at least as large as Start, and less than Limit.
|
||||
// (Bigtable string comparison is the same as Go's.)
|
||||
|
@ -287,7 +294,7 @@ func (r RowRange) proto() *btpb.RowSet {
|
|||
}
|
||||
|
||||
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
|
||||
if lastRowKey == "" {
|
||||
if lastRowKey == "" || lastRowKey < r.start {
|
||||
return r
|
||||
}
|
||||
// Set the beginning of the range to the row after the last scanned.
|
||||
|
@ -298,6 +305,46 @@ func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
|
|||
return NewRange(start, r.limit)
|
||||
}
|
||||
|
||||
func (r RowRange) valid() bool {
|
||||
return r.start < r.limit
|
||||
}
|
||||
|
||||
// RowRangeList is a sequence of RowRanges representing the union of the ranges.
|
||||
type RowRangeList []RowRange
|
||||
|
||||
func (r RowRangeList) proto() *btpb.RowSet {
|
||||
ranges := make([]*btpb.RowRange, len(r))
|
||||
for i, rr := range r {
|
||||
// RowRange.proto() returns a RowSet with a single element RowRange array
|
||||
ranges[i] = rr.proto().RowRanges[0]
|
||||
}
|
||||
return &btpb.RowSet{RowRanges: ranges}
|
||||
}
|
||||
|
||||
func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet {
|
||||
if lastRowKey == "" {
|
||||
return r
|
||||
}
|
||||
// Return a list of any range that has not yet been completely processed
|
||||
var ranges RowRangeList
|
||||
for _, rr := range r {
|
||||
retained := rr.retainRowsAfter(lastRowKey)
|
||||
if retained.valid() {
|
||||
ranges = append(ranges, retained.(RowRange))
|
||||
}
|
||||
}
|
||||
return ranges
|
||||
}
|
||||
|
||||
func (r RowRangeList) valid() bool {
|
||||
for _, rr := range r {
|
||||
if rr.valid() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SingleRow returns a RowSet for reading a single row.
|
||||
func SingleRow(row string) RowSet {
|
||||
return RowList{row}
|
||||
|
@ -373,7 +420,7 @@ func mutationsAreRetryable(muts []*btpb.Mutation) bool {
|
|||
|
||||
// Apply applies a Mutation to a specific row.
|
||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
|
@ -535,7 +582,7 @@ type entryErr struct {
|
|||
//
|
||||
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
|
||||
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
if len(rowKeys) != len(muts) {
|
||||
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
|
||||
}
|
||||
|
@ -664,13 +711,13 @@ func (ts Timestamp) TruncateToMilliseconds() Timestamp {
|
|||
if ts == ServerTime {
|
||||
return ts
|
||||
}
|
||||
return ts - ts % 1000
|
||||
return ts - ts%1000
|
||||
}
|
||||
|
||||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
|
||||
// It returns the newly written cells.
|
||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
req := &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
|
@ -727,9 +774,9 @@ func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
|||
})
|
||||
}
|
||||
|
||||
// mergeMetadata returns a context populated by the existing metadata, if any,
|
||||
// joined with internal metadata.
|
||||
func mergeMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
mdCopy, _ := metadata.FromContext(ctx)
|
||||
return metadata.NewContext(ctx, metadata.Join(mdCopy, md))
|
||||
// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata,
|
||||
// if any, joined with internal metadata.
|
||||
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
mdCopy, _ := metadata.FromOutgoingContext(ctx)
|
||||
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
|
||||
}
|
||||
|
|
11
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
11
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
|
@ -308,6 +308,17 @@ func TestClientIntegration(t *testing.T) {
|
|||
filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")),
|
||||
want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1",
|
||||
},
|
||||
{
|
||||
desc: "read with a RowRangeList and no filter",
|
||||
rr: RowRangeList{NewRange("gargamel", "hubbard"), InfiniteRange("wmckinley")},
|
||||
want: "gwashington-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "chain that excludes rows and matches nothing, in a condition",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil),
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range readTests {
|
||||
var opts []ReadOption
|
||||
|
|
20
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
20
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
|
@ -42,6 +42,7 @@ import (
|
|||
"time"
|
||||
|
||||
"bytes"
|
||||
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -118,7 +119,7 @@ func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest)
|
|||
s.mu.Lock()
|
||||
if _, ok := s.tables[tbl]; ok {
|
||||
s.mu.Unlock()
|
||||
return nil, fmt.Errorf("table %q already exists", tbl)
|
||||
return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
|
||||
}
|
||||
s.tables[tbl] = newTable(req)
|
||||
s.mu.Unlock()
|
||||
|
@ -183,7 +184,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
|
|||
for _, mod := range req.Modifications {
|
||||
if create := mod.GetCreate(); create != nil {
|
||||
if _, ok := tbl.families[mod.Id]; ok {
|
||||
return nil, fmt.Errorf("family %q already exists", mod.Id)
|
||||
return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
|
||||
}
|
||||
newcf := &columnFamily{
|
||||
name: req.Name + "/columnFamilies/" + mod.Id,
|
||||
|
@ -398,7 +399,9 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
|||
switch f := f.Filter.(type) {
|
||||
case *btpb.RowFilter_Chain_:
|
||||
for _, sub := range f.Chain.Filters {
|
||||
filterRow(sub, r)
|
||||
if !filterRow(sub, r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *btpb.RowFilter_Interleave_:
|
||||
|
@ -593,8 +596,8 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt
|
|||
fs := tbl.columnFamilies()
|
||||
r := tbl.mutableRow(string(req.RowKey))
|
||||
r.mu.Lock()
|
||||
defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if err := applyMutations(tbl, r, req.Mutations, fs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -613,6 +616,7 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
|
|||
|
||||
fs := tbl.columnFamilies()
|
||||
|
||||
defer tbl.resortRowIndex()
|
||||
for i, entry := range req.Entries {
|
||||
r := tbl.mutableRow(string(entry.RowKey))
|
||||
r.mu.Lock()
|
||||
|
@ -667,6 +671,7 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
|
|||
muts = req.TrueMutations
|
||||
}
|
||||
|
||||
defer tbl.resortRowIndex()
|
||||
if err := applyMutations(tbl, r, muts, fs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1004,12 +1009,17 @@ func (t *table) mutableRow(row string) *row {
|
|||
r = newRow(row)
|
||||
t.rowIndex[row] = r
|
||||
t.rows = append(t.rows, r)
|
||||
sort.Sort(byRowKey(t.rows)) // yay, inefficient!
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *table) resortRowIndex() {
|
||||
t.mu.Lock()
|
||||
sort.Sort(byRowKey(t.rows))
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
func (t *table) gc() {
|
||||
// This method doesn't add or remove rows, so we only need a read lock for the table.
|
||||
t.mu.RLock()
|
||||
|
|
41
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
41
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
|
@ -48,6 +48,10 @@ var (
|
|||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
instanceAdminClient *bigtable.InstanceAdminClient
|
||||
|
||||
version = "<unknown version>"
|
||||
revision = "<unknown revision>"
|
||||
revisionDate = "<unknown revision date>"
|
||||
)
|
||||
|
||||
func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
|
||||
|
@ -179,6 +183,8 @@ For convenience, values of the -project, -instance, -creds,
|
|||
admin-endpoint = hostname:port
|
||||
data-endpoint = hostname:port
|
||||
All values are optional, and all will be overridden by flags.
|
||||
|
||||
cbt ` + version + ` ` + revision + ` ` + revisionDate + `
|
||||
`
|
||||
|
||||
var commands = []struct {
|
||||
|
@ -202,10 +208,10 @@ var commands = []struct {
|
|||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createtable",
|
||||
Desc: "Create a table",
|
||||
do: doCreateTable,
|
||||
Usage: "cbt createtable <table> [initial_splits...]\n" +
|
||||
Name: "createtable",
|
||||
Desc: "Create a table",
|
||||
do: doCreateTable,
|
||||
Usage: "cbt createtable <table> [initial_splits...]\n" +
|
||||
" initial_splits=row A row key to be used to initially split the table " +
|
||||
"into multiple tablets. Can be repeated to create multiple splits.",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
|
@ -307,6 +313,13 @@ var commands = []struct {
|
|||
" maxversions=<n> Maximum number of versions to preserve",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Desc: "Print the current cbt version",
|
||||
do: doVersion,
|
||||
Usage: "cbt version",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
}
|
||||
|
||||
func doCount(ctx context.Context, args ...string) {
|
||||
|
@ -561,6 +574,12 @@ func (b byColumn) Len() int { return len(b) }
|
|||
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
|
||||
|
||||
type byFamilyName []bigtable.FamilyInfo
|
||||
|
||||
func (b byFamilyName) Len() int { return len(b) }
|
||||
func (b byFamilyName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byFamilyName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
func doLS(ctx context.Context, args ...string) {
|
||||
switch len(args) {
|
||||
default:
|
||||
|
@ -580,10 +599,14 @@ func doLS(ctx context.Context, args ...string) {
|
|||
if err != nil {
|
||||
log.Fatalf("Getting table info: %v", err)
|
||||
}
|
||||
sort.Strings(ti.Families)
|
||||
for _, fam := range ti.Families {
|
||||
fmt.Println(fam)
|
||||
sort.Sort(byFamilyName(ti.FamilyInfos))
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Family Name\tGC Policy\n")
|
||||
fmt.Fprintf(tw, "-----------\t---------\n")
|
||||
for _, fam := range ti.FamilyInfos {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", fam.Name, fam.GCPolicy)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -787,3 +810,7 @@ var unitMap = map[string]time.Duration{
|
|||
"h": time.Hour,
|
||||
"d": 24 * time.Hour,
|
||||
}
|
||||
|
||||
func doVersion(ctx context.Context, args ...string) {
|
||||
fmt.Printf("%s %s %s\n", version, revision, revisionDate)
|
||||
}
|
||||
|
|
16
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
16
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
|
@ -26,6 +26,7 @@ import (
|
|||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -38,7 +39,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
|
||||
runFor = flag.Duration("run_for", 5*time.Second,
|
||||
"how long to run the load test for; 0 to run forever until SIGTERM")
|
||||
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
|
||||
csvOutput = flag.String("csv_output", "",
|
||||
"output path for statistics in .csv format. If this file already exists it will be overwritten.")
|
||||
|
@ -108,13 +110,23 @@ func main() {
|
|||
// Upon a successful run, delete the table. Don't bother checking for errors.
|
||||
defer adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
|
||||
// Also delete the table on SIGTERM.
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
s := <-c
|
||||
log.Printf("Caught %v, cleaning scratch table.", s)
|
||||
adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
os.Exit(1)
|
||||
}()
|
||||
|
||||
log.Printf("Starting load test... (run for %v)", *runFor)
|
||||
tbl := client.Open(*scratchTable)
|
||||
sem := make(chan int, *reqCount) // limit the number of requests happening at once
|
||||
var reads, writes stats
|
||||
stopTime := time.Now().Add(*runFor)
|
||||
var wg sync.WaitGroup
|
||||
for time.Now().Before(stopTime) {
|
||||
for time.Now().Before(stopTime) || *runFor == 0 {
|
||||
sem <- 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
|
18
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
18
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
|
@ -64,6 +64,8 @@ type IntegrationTestConfig struct {
|
|||
type IntegrationEnv interface {
|
||||
Config() IntegrationTestConfig
|
||||
NewAdminClient() (*AdminClient, error)
|
||||
// NewInstanceAdminClient will return nil if instance administration is unsupported in this environment
|
||||
NewInstanceAdminClient() (*InstanceAdminClient, error)
|
||||
NewClient() (*Client, error)
|
||||
Close()
|
||||
}
|
||||
|
@ -141,6 +143,11 @@ func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
|
|||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn))
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented.
|
||||
func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewClient builds a new connected data client for this environment
|
||||
func (e *EmulatedEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
|
@ -191,6 +198,17 @@ func (e *ProdEnv) NewAdminClient() (*AdminClient, error) {
|
|||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient returns a new connected instance admin client for this environment
|
||||
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
var clientOpts []option.ClientOption
|
||||
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||
}
|
||||
return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...)
|
||||
}
|
||||
|
||||
// NewClient builds a connected data client for this environment
|
||||
func (e *ProdEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
|
|
22
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
22
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
|
@ -154,7 +154,7 @@ func (stripValueFilter) proto() *btpb.RowFilter {
|
|||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
|
||||
}
|
||||
|
||||
// TimestampRangeFilter returns a filter that matches any rows whose timestamp is within the given time bounds. A zero
|
||||
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero
|
||||
// time means no bound.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter {
|
||||
|
@ -168,7 +168,7 @@ func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter {
|
|||
return trf
|
||||
}
|
||||
|
||||
// TimestampRangeFilterMicros returns a filter that matches any rows whose timestamp is within the given time bounds,
|
||||
// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds,
|
||||
// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter {
|
||||
|
@ -187,10 +187,10 @@ func (trf timestampRangeFilter) String() string {
|
|||
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_TimestampRangeFilter{
|
||||
&btpb.TimestampRange{
|
||||
int64(trf.startTime.TruncateToMilliseconds()),
|
||||
int64(trf.endTime.TruncateToMilliseconds()),
|
||||
},
|
||||
&btpb.TimestampRange{
|
||||
int64(trf.startTime.TruncateToMilliseconds()),
|
||||
int64(trf.endTime.TruncateToMilliseconds()),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
|
@ -228,8 +228,8 @@ func ValueRangeFilter(start, end []byte) Filter {
|
|||
}
|
||||
|
||||
type valueRangeFilter struct {
|
||||
start []byte
|
||||
end []byte
|
||||
start []byte
|
||||
end []byte
|
||||
}
|
||||
|
||||
func (vrf valueRangeFilter) String() string {
|
||||
|
@ -260,8 +260,8 @@ func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter {
|
|||
|
||||
type conditionFilter struct {
|
||||
predicateFilter Filter
|
||||
trueFilter Filter
|
||||
falseFilter Filter
|
||||
trueFilter Filter
|
||||
falseFilter Filter
|
||||
}
|
||||
|
||||
func (cf conditionFilter) String() string {
|
||||
|
@ -282,7 +282,7 @@ func (cf conditionFilter) proto() *btpb.RowFilter {
|
|||
cf.predicateFilter.proto(),
|
||||
tf,
|
||||
ff,
|
||||
}}}
|
||||
}}}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): More filters: sampling
|
||||
|
|
27
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
27
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
|
@ -129,3 +129,30 @@ func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
|||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// GCRuleToString converts the given GcRule proto to a user-visible string.
|
||||
func GCRuleToString(rule *bttdpb.GcRule) string {
|
||||
if rule == nil {
|
||||
return "<default>"
|
||||
}
|
||||
var ruleStr string
|
||||
if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok {
|
||||
ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String()
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok {
|
||||
ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok {
|
||||
var chunks []string
|
||||
for _, intRule := range r.Intersection.Rules {
|
||||
chunks = append(chunks, GCRuleToString(intRule))
|
||||
}
|
||||
ruleStr += "(" + strings.Join(chunks, " && ") + ")"
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok {
|
||||
var chunks []string
|
||||
for _, unionRule := range r.Union.Rules {
|
||||
chunks = append(chunks, GCRuleToString(unionRule))
|
||||
}
|
||||
ruleStr += "(" + strings.Join(chunks, " || ") + ")"
|
||||
}
|
||||
|
||||
return ruleStr
|
||||
}
|
||||
|
|
46
vendor/cloud.google.com/go/bigtable/gc_test.go
generated
vendored
Normal file
46
vendor/cloud.google.com/go/bigtable/gc_test.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
Copyright 2017 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
)
|
||||
|
||||
func TestGcRuleToString(t *testing.T) {
|
||||
|
||||
intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour))
|
||||
|
||||
var tests = []struct {
|
||||
proto *bttdpb.GcRule
|
||||
want string
|
||||
}{
|
||||
{MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"},
|
||||
{MaxVersionsPolicy(5).proto(), "versions() > 5"},
|
||||
{intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"},
|
||||
{UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(),
|
||||
"((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := GCRuleToString(test.proto)
|
||||
if got != test.want {
|
||||
t.Errorf("got gc rule string: %v, wanted: %v", got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
|
@ -38,7 +38,7 @@ func TestRandomizedDelays(t *testing.T) {
|
|||
// Keep failing, make sure we never slept more than max (plus a fudge factor)
|
||||
if !invokeTime.IsZero() {
|
||||
if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) {
|
||||
t.Fatalf("Slept too long. Got: %v, want: %v", got, max)
|
||||
t.Logf("Slept too long. Got: %v, want: %v", got, max)
|
||||
}
|
||||
}
|
||||
invokeTime = time.Now()
|
||||
|
|
8
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
8
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
|
@ -277,6 +277,14 @@ func TestRetainRowsAfter(t *testing.T) {
|
|||
t.Errorf("range retry: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
prevRowRangeList := RowRangeList{NewRange("a", "d"), NewRange("e", "g"), NewRange("h", "l")}
|
||||
prevRowKey = "f"
|
||||
wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")}
|
||||
got = prevRowRangeList.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(wantRowRangeList, got) {
|
||||
t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList)
|
||||
}
|
||||
|
||||
prevRowList := RowList{"a", "b", "c", "d", "e", "f"}
|
||||
prevRowKey = "b"
|
||||
wantList := RowList{"c", "d", "e", "f"}
|
||||
|
|
16
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go
generated
vendored
16
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go
generated
vendored
|
@ -60,6 +60,10 @@ type Controller struct {
|
|||
options Options
|
||||
uniquifier string
|
||||
description string
|
||||
// labels are included when registering the debuggee. They should contain
|
||||
// the module name, version and minorversion, and are used by the debug UI
|
||||
// to label the correct version active for debugging.
|
||||
labels map[string]string
|
||||
// mu protects debuggeeID
|
||||
mu sync.Mutex
|
||||
// debuggeeID is returned from the server on registration, and is passed back
|
||||
|
@ -133,16 +137,18 @@ func NewController(ctx context.Context, o Options) (*Controller, error) {
|
|||
scJSON = nil
|
||||
o.SourceContexts = nil
|
||||
}
|
||||
const minorversion = "107157" // any arbitrary numeric string
|
||||
|
||||
// Compute a uniquifier string by hashing the project number, app module name,
|
||||
// app module version, debuglet version, and source context.
|
||||
// The choice of hash function is arbitrary.
|
||||
h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s",
|
||||
h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s %d %s",
|
||||
len(o.ProjectNumber), o.ProjectNumber,
|
||||
len(o.AppModule), o.AppModule,
|
||||
len(o.AppVersion), o.AppVersion,
|
||||
len(agentVersionString), agentVersionString,
|
||||
len(scJSON), scJSON)))
|
||||
len(scJSON), scJSON,
|
||||
len(minorversion), minorversion)))
|
||||
uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters
|
||||
|
||||
description := o.ProjectID
|
||||
|
@ -166,6 +172,11 @@ func NewController(ctx context.Context, o Options) (*Controller, error) {
|
|||
options: o,
|
||||
uniquifier: uniquifier,
|
||||
description: description,
|
||||
labels: map[string]string{
|
||||
"module": o.AppModule,
|
||||
"version": o.AppVersion,
|
||||
"minorversion": minorversion,
|
||||
},
|
||||
}
|
||||
|
||||
return c, nil
|
||||
|
@ -256,6 +267,7 @@ func (c *Controller) register(ctx context.Context) error {
|
|||
Project: c.options.ProjectNumber,
|
||||
SourceContexts: c.options.SourceContexts,
|
||||
Uniquifier: c.uniquifier,
|
||||
Labels: c.labels,
|
||||
},
|
||||
}
|
||||
resp, err := c.s.Register(ctx, &req)
|
||||
|
|
36
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go
generated
vendored
36
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go
generated
vendored
|
@ -15,6 +15,10 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
@ -168,6 +172,9 @@ func TestDebugletControllerClientLibrary(t *testing.T) {
|
|||
if c, err = NewController(ctx, opts); err != nil {
|
||||
t.Fatal("Initializing Controller client:", err)
|
||||
}
|
||||
if err := validateLabels(c, opts); err != nil {
|
||||
t.Fatalf("Invalid labels:\n%v", err)
|
||||
}
|
||||
if list, err = c.List(ctx); err != nil {
|
||||
t.Fatal("List:", err)
|
||||
}
|
||||
|
@ -208,6 +215,35 @@ func TestDebugletControllerClientLibrary(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func validateLabels(c *Controller, o Options) error {
|
||||
errMsg := new(bytes.Buffer)
|
||||
if m, ok := c.labels["module"]; ok {
|
||||
if m != o.AppModule {
|
||||
errMsg.WriteString(fmt.Sprintf("label module: want %s, got %s\n", o.AppModule, m))
|
||||
}
|
||||
} else {
|
||||
errMsg.WriteString("Missing \"module\" label\n")
|
||||
}
|
||||
if v, ok := c.labels["version"]; ok {
|
||||
if v != o.AppVersion {
|
||||
errMsg.WriteString(fmt.Sprintf("label version: want %s, got %s\n", o.AppVersion, v))
|
||||
}
|
||||
} else {
|
||||
errMsg.WriteString("Missing \"version\" label\n")
|
||||
}
|
||||
if mv, ok := c.labels["minorversion"]; ok {
|
||||
if _, err := strconv.Atoi(mv); err != nil {
|
||||
errMsg.WriteString(fmt.Sprintln("label minorversion: not a numeric string:", mv))
|
||||
}
|
||||
} else {
|
||||
errMsg.WriteString("Missing \"minorversion\" label\n")
|
||||
}
|
||||
if errMsg.Len() != 0 {
|
||||
return errors.New(errMsg.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestIsAbortedError(t *testing.T) {
|
||||
if !isAbortedError(abortedError) {
|
||||
t.Errorf("isAborted(%+v): got false, want true", abortedError)
|
||||
|
|
35
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
35
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -34,8 +34,6 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -48,6 +46,8 @@ const (
|
|||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
|
||||
userAgent = "gcloud-golang/0.1"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
|
@ -65,24 +65,20 @@ var (
|
|||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -132,6 +128,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error)
|
|||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
@ -202,7 +199,9 @@ func testOnGCE() bool {
|
|||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
|
|
12
vendor/cloud.google.com/go/container/container.go
generated
vendored
12
vendor/cloud.google.com/go/container/container.go
generated
vendored
|
@ -12,15 +12,9 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package container contains a Google Container Engine client.
|
||||
// Package container contains a deprecated Google Container Engine client.
|
||||
//
|
||||
// For more information about the API,
|
||||
// see https://cloud.google.com/container-engine/docs
|
||||
//
|
||||
// Authentication
|
||||
//
|
||||
// See examples of authorization and authentication at
|
||||
// https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
// Deprecated: Use google.golang.org/api/container instead.
|
||||
package container // import "cloud.google.com/go/container"
|
||||
|
||||
import (
|
||||
|
@ -153,7 +147,7 @@ func resourceFromRaw(c *raw.Cluster) *Resource {
|
|||
Description: c.Description,
|
||||
Zone: c.Zone,
|
||||
Status: Status(c.Status),
|
||||
Num: c.InitialNodeCount,
|
||||
Num: c.CurrentNodeCount,
|
||||
APIVersion: c.InitialClusterVersion,
|
||||
Endpoint: c.Endpoint,
|
||||
Username: c.MasterAuth.Username,
|
||||
|
|
26
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
26
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
|
@ -53,6 +53,10 @@ type protoClient interface {
|
|||
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
|
||||
// metadata to be sent in each request for server-side traffic management.
|
||||
type datastoreClient struct {
|
||||
// Embed so we still implement the DatastoreClient interface,
|
||||
// if the interface adds more methods.
|
||||
pb.DatastoreClient
|
||||
|
||||
c pb.DatastoreClient
|
||||
md metadata.MD
|
||||
}
|
||||
|
@ -67,27 +71,27 @@ func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreCli
|
|||
}
|
||||
|
||||
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
|
||||
return dc.c.Lookup(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
return dc.c.Lookup(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
|
||||
return dc.c.RunQuery(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
return dc.c.RunQuery(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
|
||||
return dc.c.BeginTransaction(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
return dc.c.BeginTransaction(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
|
||||
return dc.c.Commit(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
return dc.c.Commit(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
|
||||
return dc.c.Rollback(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
return dc.c.Rollback(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
|
||||
return dc.c.AllocateIds(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
return dc.c.AllocateIds(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
// Client is a client for reading and writing data in a datastore dataset.
|
||||
|
@ -202,9 +206,9 @@ func keyToProto(k *Key) *pb.Key {
|
|||
for {
|
||||
el := &pb.Key_PathElement{Kind: k.Kind}
|
||||
if k.ID != 0 {
|
||||
el.IdType = &pb.Key_PathElement_Id{k.ID}
|
||||
el.IdType = &pb.Key_PathElement_Id{Id: k.ID}
|
||||
} else if k.Name != "" {
|
||||
el.IdType = &pb.Key_PathElement_Name{k.Name}
|
||||
el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
|
||||
}
|
||||
path = append([]*pb.Key_PathElement{el}, path...)
|
||||
if k.Parent == nil {
|
||||
|
@ -549,9 +553,9 @@ func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
|
|||
}
|
||||
var mut *pb.Mutation
|
||||
if k.Incomplete() {
|
||||
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{p}}
|
||||
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}}
|
||||
} else {
|
||||
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{p}}
|
||||
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}}
|
||||
}
|
||||
mutations = append(mutations, mut)
|
||||
}
|
||||
|
@ -593,7 +597,7 @@ func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
|
|||
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
|
||||
}
|
||||
mutations = append(mutations, &pb.Mutation{
|
||||
Operation: &pb.Mutation_Delete{keyToProto(k)},
|
||||
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
|
||||
})
|
||||
}
|
||||
return mutations, nil
|
||||
|
|
845
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
845
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
|
@ -1954,7 +1954,7 @@ func TestRoundTrip(t *testing.T) {
|
|||
equal = reflect.DeepEqual(got, tc.want)
|
||||
}
|
||||
if !equal {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want)
|
||||
t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -1986,6 +1986,19 @@ func (pls aValuePLS) Save() ([]Property, error) {
|
|||
return []Property{{Name: "Count", Value: 8}}, nil
|
||||
}
|
||||
|
||||
type aValuePtrPLS struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
func (pls *aValuePtrPLS) Load([]Property) error {
|
||||
pls.Count = 11
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pls *aValuePtrPLS) Save() ([]Property, error) {
|
||||
return []Property{{Name: "Count", Value: 12}}, nil
|
||||
}
|
||||
|
||||
type aNotPLS struct {
|
||||
Count int
|
||||
}
|
||||
|
@ -2001,15 +2014,21 @@ func (s *plsString) Save() ([]Property, error) {
|
|||
return []Property{{Name: "SS", Value: "SAVED"}}, nil
|
||||
}
|
||||
|
||||
func ptrToplsString(s string) *plsString {
|
||||
plsStr := plsString(s)
|
||||
return &plsStr
|
||||
}
|
||||
|
||||
type aSubPLS struct {
|
||||
Foo string
|
||||
Bar *aPtrPLS
|
||||
Baz aValuePtrPLS
|
||||
S plsString
|
||||
}
|
||||
|
||||
type aSubNotPLS struct {
|
||||
Foo string
|
||||
Bar *aNotPLS
|
||||
S plsString `datastore:",omitempty"`
|
||||
}
|
||||
|
||||
type aSubPLSErr struct {
|
||||
|
@ -2017,77 +2036,203 @@ type aSubPLSErr struct {
|
|||
Bar aValuePLS
|
||||
}
|
||||
|
||||
func TestLoadSaveNestedStructPLS(t *testing.T) {
|
||||
type aSubPLSNoErr struct {
|
||||
Foo string
|
||||
Bar aPtrPLS
|
||||
}
|
||||
|
||||
type GrandparentFlatten struct {
|
||||
Parent Parent `datastore:",flatten"`
|
||||
}
|
||||
|
||||
type GrandparentOfPtrFlatten struct {
|
||||
Parent ParentOfPtr `datastore:",flatten"`
|
||||
}
|
||||
|
||||
type GrandparentOfSlice struct {
|
||||
Parent ParentOfSlice
|
||||
}
|
||||
|
||||
type GrandparentOfSlicePtrs struct {
|
||||
Parent ParentOfSlicePtrs
|
||||
}
|
||||
|
||||
type GrandparentOfSliceFlatten struct {
|
||||
Parent ParentOfSlice `datastore:",flatten"`
|
||||
}
|
||||
|
||||
type GrandparentOfSlicePtrsFlatten struct {
|
||||
Parent ParentOfSlicePtrs `datastore:",flatten"`
|
||||
}
|
||||
|
||||
type Grandparent struct {
|
||||
Parent Parent
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Child Child
|
||||
String plsString
|
||||
}
|
||||
|
||||
type ParentOfPtr struct {
|
||||
Child *Child
|
||||
String *plsString
|
||||
}
|
||||
|
||||
type ParentOfSlice struct {
|
||||
Children []Child
|
||||
Strings []plsString
|
||||
}
|
||||
|
||||
type ParentOfSlicePtrs struct {
|
||||
Children []*Child
|
||||
Strings []*plsString
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
I int
|
||||
Grandchild Grandchild
|
||||
}
|
||||
|
||||
type Grandchild struct {
|
||||
S string
|
||||
}
|
||||
|
||||
func (c *Child) Load(props []Property) error {
|
||||
for _, p := range props {
|
||||
if p.Name == "I" {
|
||||
c.I += 1
|
||||
} else if p.Name == "Grandchild.S" {
|
||||
c.Grandchild.S = "grandchild loaded"
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Child) Save() ([]Property, error) {
|
||||
v := c.I + 1
|
||||
return []Property{
|
||||
{Name: "I", Value: v},
|
||||
{Name: "Grandchild.S", Value: fmt.Sprintf("grandchild saved %d", v)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestLoadSavePLS(t *testing.T) {
|
||||
type testCase struct {
|
||||
desc string
|
||||
src interface{}
|
||||
wantSave *pb.Entity
|
||||
wantLoad interface{}
|
||||
saveErr string
|
||||
loadErr string
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
desc: "substruct (ptr) does implement PLS",
|
||||
src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}},
|
||||
desc: "non-struct implements PLS (top-level)",
|
||||
src: ptrToplsString("hello"),
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{"foo"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
wantLoad: ptrToplsString("LOADED"),
|
||||
},
|
||||
{
|
||||
desc: "substructs do implement PLS",
|
||||
src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}, Baz: aValuePtrPLS{Count: 15}, S: "something"},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}},
|
||||
"Bar": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{4}},
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"Baz": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"S": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
// PLS impl for 'S' not used, not entity.
|
||||
wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}},
|
||||
wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}, Baz: aValuePtrPLS{Count: 11}, S: "LOADED"},
|
||||
},
|
||||
{
|
||||
desc: "substruct (ptr) does implement PLS, nil valued substruct",
|
||||
src: &aSubPLS{Foo: "foo"},
|
||||
src: &aSubPLS{Foo: "foo", S: "something"},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{"foo"}},
|
||||
},
|
||||
},
|
||||
wantLoad: &aSubPLS{Foo: "foo"},
|
||||
},
|
||||
{
|
||||
desc: "substruct (ptr) does not implement PLS",
|
||||
src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}, S: "something"},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{"foo"}},
|
||||
"Bar": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}},
|
||||
"Baz": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"S": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
// PLS impl for 'S' not used, not entity.
|
||||
"S": {ValueType: &pb.Value_StringValue{"something"}},
|
||||
},
|
||||
},
|
||||
wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}, S: "something"},
|
||||
wantLoad: &aSubPLS{Foo: "foo", Baz: aValuePtrPLS{Count: 11}, S: "LOADED"},
|
||||
},
|
||||
{
|
||||
desc: "substruct (value) does implement PLS, error",
|
||||
src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 3}},
|
||||
desc: "substruct (ptr) does not implement PLS",
|
||||
src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{"foo"}},
|
||||
"Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}},
|
||||
"Bar": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{8}},
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}},
|
||||
},
|
||||
{
|
||||
desc: "substruct (value) does implement PLS, error on save",
|
||||
src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 2}},
|
||||
wantSave: (*pb.Entity)(nil),
|
||||
wantLoad: &aSubPLSErr{},
|
||||
saveErr: "PropertyLoadSaver methods must be implemented on a pointer",
|
||||
},
|
||||
{
|
||||
desc: "substruct (value) does implement PLS, error on load",
|
||||
src: &aSubPLSNoErr{Foo: "foo", Bar: aPtrPLS{Count: 2}},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}},
|
||||
"Bar": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
@ -2096,45 +2241,508 @@ func TestLoadSaveNestedStructPLS(t *testing.T) {
|
|||
wantLoad: &aSubPLSErr{},
|
||||
loadErr: "PropertyLoadSaver methods must be implemented on a pointer",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "parent does not have flatten option, child impl PLS",
|
||||
src: &Grandparent{
|
||||
Parent: Parent{
|
||||
Child: Child{
|
||||
I: 9,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
String: plsString("something"),
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Child": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}},
|
||||
"Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"String": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
wantLoad: &Grandparent{
|
||||
Parent: Parent{
|
||||
Child: Child{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
String: "LOADED",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "parent has flatten option enabled, child impl PLS",
|
||||
src: &GrandparentFlatten{
|
||||
Parent: Parent{
|
||||
Child: Child{
|
||||
I: 7,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
String: plsString("something"),
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}},
|
||||
"Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}},
|
||||
"Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
wantLoad: &GrandparentFlatten{
|
||||
Parent: Parent{
|
||||
Child: Child{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
String: "LOADED",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
desc: "parent has flatten option enabled, child (ptr to) impl PLS",
|
||||
src: &GrandparentOfPtrFlatten{
|
||||
Parent: ParentOfPtr{
|
||||
Child: &Child{
|
||||
I: 7,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
String: ptrToplsString("something"),
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}},
|
||||
"Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}},
|
||||
"Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
wantLoad: &GrandparentOfPtrFlatten{
|
||||
Parent: ParentOfPtr{
|
||||
Child: &Child{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
String: ptrToplsString("LOADED"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "children (slice of) impl PLS",
|
||||
src: &GrandparentOfSlice{
|
||||
Parent: ParentOfSlice{
|
||||
Children: []Child{
|
||||
{
|
||||
I: 7,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 9,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []plsString{
|
||||
"something1",
|
||||
"something2",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Children": {ValueType: &pb.Value_ArrayValue{
|
||||
ArrayValue: &pb.ArrayValue{Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}},
|
||||
"Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}},
|
||||
"Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
"Strings": {ValueType: &pb.Value_ArrayValue{
|
||||
ArrayValue: &pb.ArrayValue{Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
wantLoad: &GrandparentOfSlice{
|
||||
Parent: ParentOfSlice{
|
||||
Children: []Child{
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []plsString{
|
||||
"LOADED",
|
||||
"LOADED",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "children (slice of ptrs) impl PLS",
|
||||
src: &GrandparentOfSlicePtrs{
|
||||
Parent: ParentOfSlicePtrs{
|
||||
Children: []*Child{
|
||||
{
|
||||
I: 7,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 9,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []*plsString{
|
||||
ptrToplsString("something1"),
|
||||
ptrToplsString("something2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"Children": {ValueType: &pb.Value_ArrayValue{
|
||||
ArrayValue: &pb.ArrayValue{Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}},
|
||||
"Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}},
|
||||
"Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
"Strings": {ValueType: &pb.Value_ArrayValue{
|
||||
ArrayValue: &pb.ArrayValue{Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
wantLoad: &GrandparentOfSlicePtrs{
|
||||
Parent: ParentOfSlicePtrs{
|
||||
Children: []*Child{
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []*plsString{
|
||||
ptrToplsString("LOADED"),
|
||||
ptrToplsString("LOADED"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "parent has flatten option, children (slice of) impl PLS",
|
||||
src: &GrandparentOfSliceFlatten{
|
||||
Parent: ParentOfSlice{
|
||||
Children: []Child{
|
||||
{
|
||||
I: 7,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 9,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []plsString{
|
||||
"something1",
|
||||
"something2",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_IntegerValue{IntegerValue: 8}},
|
||||
{ValueType: &pb.Value_IntegerValue{IntegerValue: 10}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}},
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
wantLoad: &GrandparentOfSliceFlatten{
|
||||
Parent: ParentOfSlice{
|
||||
Children: []Child{
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []plsString{
|
||||
"LOADED",
|
||||
"LOADED",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "parent has flatten option, children (slice of ptrs) impl PLS",
|
||||
src: &GrandparentOfSlicePtrsFlatten{
|
||||
Parent: ParentOfSlicePtrs{
|
||||
Children: []*Child{
|
||||
{
|
||||
I: 7,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 9,
|
||||
Grandchild: Grandchild{
|
||||
S: "BAD2",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []*plsString{
|
||||
ptrToplsString("something1"),
|
||||
ptrToplsString("something1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSave: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_IntegerValue{IntegerValue: 8}},
|
||||
{ValueType: &pb.Value_IntegerValue{IntegerValue: 10}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}},
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "SAVED"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
wantLoad: &GrandparentOfSlicePtrsFlatten{
|
||||
Parent: ParentOfSlicePtrs{
|
||||
Children: []*Child{
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
{
|
||||
I: 1,
|
||||
Grandchild: Grandchild{
|
||||
S: "grandchild loaded",
|
||||
},
|
||||
},
|
||||
},
|
||||
Strings: []*plsString{
|
||||
ptrToplsString("LOADED"),
|
||||
ptrToplsString("LOADED"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
e, err := saveEntity(testKey0, tc.src)
|
||||
if err != nil {
|
||||
t.Errorf("%s: save: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(e, tc.wantSave) {
|
||||
t.Errorf("%s: save: got: %#v, want: %#v", tc.desc, e, tc.wantSave)
|
||||
if tc.saveErr == "" { // Want no error.
|
||||
if err != nil {
|
||||
t.Errorf("%s: save: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(e, tc.wantSave) {
|
||||
t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave)
|
||||
continue
|
||||
}
|
||||
} else { // Want error.
|
||||
if err == nil {
|
||||
t.Errorf("%s: save: want err", tc.desc)
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(err.Error(), tc.saveErr) {
|
||||
t.Errorf("%s: save: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.saveErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
gota := reflect.New(reflect.TypeOf(tc.wantLoad).Elem()).Interface()
|
||||
err = loadEntityProto(gota, e)
|
||||
switch tc.loadErr {
|
||||
case "":
|
||||
if tc.loadErr == "" { // Want no error.
|
||||
if err != nil {
|
||||
t.Errorf("%s: load: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
if !reflect.DeepEqual(gota, tc.wantLoad) {
|
||||
t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad)
|
||||
continue
|
||||
}
|
||||
} else { // Want error.
|
||||
if err == nil {
|
||||
t.Errorf("%s: load: want err", tc.desc)
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(err.Error(), tc.loadErr) {
|
||||
t.Errorf("%s: load: want err '%s', got '%s'", tc.desc, err.Error(), tc.loadErr)
|
||||
t.Errorf("%s: load: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.loadErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.wantLoad, gota) {
|
||||
t.Errorf("%s: load: got: %#v, want: %#v", tc.desc, gota, tc.wantLoad)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestQueryConstruction(t *testing.T) {
|
||||
|
@ -2349,20 +2957,22 @@ func TestPutMultiTypes(t *testing.T) {
|
|||
NameKey("testKind", "second", nil),
|
||||
}
|
||||
want := []*pb.Mutation{
|
||||
{Operation: &pb.Mutation_Upsert{&pb.Entity{
|
||||
Key: keyToProto(keys[0]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{1}},
|
||||
"B": {ValueType: &pb.Value_StringValue{"one"}},
|
||||
},
|
||||
}}},
|
||||
{Operation: &pb.Mutation_Upsert{&pb.Entity{
|
||||
Key: keyToProto(keys[1]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"B": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
},
|
||||
}}},
|
||||
{Operation: &pb.Mutation_Upsert{
|
||||
Upsert: &pb.Entity{
|
||||
Key: keyToProto(keys[0]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}},
|
||||
"B": {ValueType: &pb.Value_StringValue{StringValue: "one"}},
|
||||
},
|
||||
}}},
|
||||
{Operation: &pb.Mutation_Upsert{
|
||||
Upsert: &pb.Entity{
|
||||
Key: keyToProto(keys[1]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
"B": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
},
|
||||
}}},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
|
@ -2423,11 +3033,11 @@ func TestNoIndexOnSliceProperties(t *testing.T) {
|
|||
}
|
||||
|
||||
want := &pb.Value{
|
||||
ValueType: &pb.Value_ArrayValue{&pb.ArrayValue{[]*pb.Value{
|
||||
{ValueType: &pb.Value_IntegerValue{123}, ExcludeFromIndexes: true},
|
||||
{ValueType: &pb.Value_BooleanValue{false}, ExcludeFromIndexes: true},
|
||||
{ValueType: &pb.Value_StringValue{"short"}, ExcludeFromIndexes: true},
|
||||
{ValueType: &pb.Value_StringValue{strings.Repeat("a", 1503)}, ExcludeFromIndexes: true},
|
||||
ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_IntegerValue{IntegerValue: 123}, ExcludeFromIndexes: true},
|
||||
{ValueType: &pb.Value_BooleanValue{BooleanValue: false}, ExcludeFromIndexes: true},
|
||||
{ValueType: &pb.Value_StringValue{StringValue: "short"}, ExcludeFromIndexes: true},
|
||||
{ValueType: &pb.Value_StringValue{StringValue: strings.Repeat("a", 1503)}, ExcludeFromIndexes: true},
|
||||
}}},
|
||||
}
|
||||
if got := entity.Properties["repeated"]; !proto.Equal(got, want) {
|
||||
|
@ -2562,15 +3172,15 @@ func TestDeferred(t *testing.T) {
|
|||
entity1 := &pb.Entity{
|
||||
Key: keyToProto(keys[0]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{1}},
|
||||
"B": {ValueType: &pb.Value_StringValue{"one"}},
|
||||
"A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}},
|
||||
"B": {ValueType: &pb.Value_StringValue{StringValue: "one"}},
|
||||
},
|
||||
}
|
||||
entity2 := &pb.Entity{
|
||||
Key: keyToProto(keys[1]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"B": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
"B": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2630,19 +3240,94 @@ func TestDeferred(t *testing.T) {
|
|||
for _, e := range dst {
|
||||
if e.A == 1 {
|
||||
if e.B != "one" {
|
||||
t.Fatalf("unexpected entity %#v", e)
|
||||
t.Fatalf("unexpected entity %+v", e)
|
||||
}
|
||||
} else if e.A == 2 {
|
||||
if e.B != "two" {
|
||||
t.Fatalf("unexpected entity %#v", e)
|
||||
t.Fatalf("unexpected entity %+v", e)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("unexpected entity %#v", e)
|
||||
t.Fatalf("unexpected entity %+v", e)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type KeyLoaderEnt struct {
|
||||
A int
|
||||
K *Key
|
||||
}
|
||||
|
||||
func (e *KeyLoaderEnt) Load(p []Property) error {
|
||||
e.A = 2
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *KeyLoaderEnt) LoadKey(k *Key) error {
|
||||
e.K = k
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *KeyLoaderEnt) Save() ([]Property, error) {
|
||||
return []Property{{Name: "A", Value: int64(3)}}, nil
|
||||
}
|
||||
|
||||
func TestKeyLoaderEndToEnd(t *testing.T) {
|
||||
keys := []*Key{
|
||||
NameKey("testKind", "first", nil),
|
||||
NameKey("testKind", "second", nil),
|
||||
}
|
||||
|
||||
entity1 := &pb.Entity{
|
||||
Key: keyToProto(keys[0]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}},
|
||||
"B": {ValueType: &pb.Value_StringValue{StringValue: "one"}},
|
||||
},
|
||||
}
|
||||
entity2 := &pb.Entity{
|
||||
Key: keyToProto(keys[1]),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
"B": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
},
|
||||
}
|
||||
|
||||
fakeClient := &fakeDatastoreClient{
|
||||
lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) {
|
||||
return &pb.LookupResponse{
|
||||
Found: []*pb.EntityResult{
|
||||
{
|
||||
Entity: entity1,
|
||||
Version: 1,
|
||||
},
|
||||
{
|
||||
Entity: entity2,
|
||||
Version: 1,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
client := &Client{
|
||||
client: fakeClient,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
dst := make([]*KeyLoaderEnt, len(keys))
|
||||
err := client.GetMulti(ctx, keys, dst)
|
||||
if err != nil {
|
||||
t.Fatalf("client.Get: %v", err)
|
||||
}
|
||||
|
||||
for i := range dst {
|
||||
if !reflect.DeepEqual(dst[i].K, keys[i]) {
|
||||
t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeferredMissing(t *testing.T) {
|
||||
type Ent struct {
|
||||
A int
|
||||
|
@ -2722,12 +3407,14 @@ func TestDeferredMissing(t *testing.T) {
|
|||
|
||||
for _, e := range dst {
|
||||
if e.A != 0 || e.B != "" {
|
||||
t.Fatalf("unexpected entity %#v", e)
|
||||
t.Fatalf("unexpected entity %+v", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDatastoreClient struct {
|
||||
pb.DatastoreClient
|
||||
|
||||
// Optional handlers for the datastore methods.
|
||||
// Any handlers left undefined will return an error.
|
||||
lookup func(*pb.LookupRequest) (*pb.LookupResponse, error)
|
||||
|
|
34
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
34
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
|
@ -319,6 +319,40 @@ Example code:
|
|||
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
|
||||
arbitrary entity's contents.
|
||||
|
||||
The KeyLoader Interface
|
||||
|
||||
If a type implements the PropertyLoadSaver interface, it may
|
||||
also want to implement the KeyLoader interface.
|
||||
The KeyLoader interface exists to allow implementations of PropertyLoadSaver
|
||||
to also load an Entity's Key into the Go type. This type may be a struct
|
||||
pointer, but it does not have to be. The datastore package will call LoadKey
|
||||
when getting the entity's contents, after calling Load.
|
||||
|
||||
Example code:
|
||||
|
||||
type WithKeyExample struct {
|
||||
I int
|
||||
Key *datastore.Key
|
||||
}
|
||||
|
||||
func (x *WithKeyExample) LoadKey(k *datastore.Key) error {
|
||||
x.Key = k
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *WithKeyExample) Load(ps []datastore.Property) error {
|
||||
// Load I as usual.
|
||||
return datastore.LoadStruct(x, ps)
|
||||
}
|
||||
|
||||
func (x *WithKeyExample) Save() ([]datastore.Property, error) {
|
||||
// Save I as usual.
|
||||
return datastore.SaveStruct(x)
|
||||
}
|
||||
|
||||
To load a Key into a struct which does not implement the PropertyLoadSaver
|
||||
interface, see the "Key Field" section above.
|
||||
|
||||
|
||||
Queries
|
||||
|
||||
|
|
79
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
79
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
|
@ -123,7 +123,16 @@ func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.V
|
|||
return "cannot set struct field"
|
||||
}
|
||||
|
||||
var err error
|
||||
// If field implements PLS, we delegate loading to the PLS's Load early,
|
||||
// and stop iterating through fields.
|
||||
ok, err := plsFieldLoad(v, p, fieldNames)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
codec, err = structCache.Fields(field.Type)
|
||||
if err != nil {
|
||||
|
@ -143,6 +152,17 @@ func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.V
|
|||
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
|
||||
}
|
||||
structValue = v.Index(sliceIndex)
|
||||
|
||||
// If structValue implements PLS, we delegate loading to the PLS's
|
||||
// Load early, and stop iterating through fields.
|
||||
ok, err := plsFieldLoad(structValue, p, fieldNames)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
if structValue.Type().Kind() == reflect.Struct {
|
||||
codec, err = structCache.Fields(structValue.Type())
|
||||
if err != nil {
|
||||
|
@ -181,10 +201,50 @@ func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.V
|
|||
return ""
|
||||
}
|
||||
|
||||
// plsFieldLoad first tries to converts v's value to a PLS, then v's addressed
|
||||
// value to a PLS. If neither succeeds, plsFieldLoad returns false for first return
|
||||
// value. Otherwise, the first return value will be true.
|
||||
// If v is successfully converted to a PLS, plsFieldLoad will then try to Load
|
||||
// the property p into v (by way of the PLS's Load method).
|
||||
//
|
||||
// If the field v has been flattened, the Property's name must be altered
|
||||
// before calling Load to reflect the field v.
|
||||
// For example, if our original field name was "A.B.C.D",
|
||||
// and at this point in iteration we had initialized the field
|
||||
// corresponding to "A" and have moved into the struct, so that now
|
||||
// v corresponds to the field named "B", then we want to let the
|
||||
// PLS handle this field (B)'s subfields ("C", "D"),
|
||||
// so we send the property to the PLS's Load, renamed to "C.D".
|
||||
//
|
||||
// If subfields are present, the field v has been flattened.
|
||||
func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err error) {
|
||||
vpls, err := plsForLoad(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if vpls == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If Entity, load properties as well as key.
|
||||
if e, ok := p.Value.(*Entity); ok {
|
||||
err = loadEntity(vpls, e)
|
||||
return true, err
|
||||
}
|
||||
|
||||
// If flattened, we must alter the property's name to reflect
|
||||
// the field v.
|
||||
if len(subfields) > 0 {
|
||||
p.Name = strings.Join(subfields, ".")
|
||||
}
|
||||
|
||||
return true, vpls.Load([]Property{p})
|
||||
}
|
||||
|
||||
// setVal sets 'v' to the value of the Property 'p'.
|
||||
func setVal(v reflect.Value, p Property) string {
|
||||
pValue := p.Value
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x, ok := pValue.(int64)
|
||||
|
@ -267,12 +327,6 @@ func setVal(v reflect.Value, p Property) string {
|
|||
if !ok {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
|
||||
// Check if v implements PropertyLoadSaver.
|
||||
if _, ok := v.Interface().(PropertyLoadSaver); ok {
|
||||
return fmt.Sprintf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface())
|
||||
}
|
||||
|
||||
err := loadEntity(v.Addr().Interface(), ent)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
|
@ -320,7 +374,14 @@ func loadEntityProto(dst interface{}, src *pb.Entity) error {
|
|||
|
||||
func loadEntity(dst interface{}, ent *Entity) error {
|
||||
if pls, ok := dst.(PropertyLoadSaver); ok {
|
||||
return pls.Load(ent.Properties)
|
||||
err := pls.Load(ent.Properties)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e, ok := dst.(KeyLoader); ok {
|
||||
err = e.LoadKey(ent.Key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return loadEntityToStruct(dst, ent)
|
||||
}
|
||||
|
|
453
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
453
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
|
@ -84,71 +84,71 @@ func TestLoadEntityNestedLegacy(t *testing.T) {
|
|||
want interface{}
|
||||
}{
|
||||
{
|
||||
"nested",
|
||||
&pb.Entity{
|
||||
desc: "nested",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"A.I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"A.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
&NestedSimple1{
|
||||
want: &NestedSimple1{
|
||||
A: Simple{I: 2},
|
||||
X: "two",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with tag",
|
||||
&pb.Entity{
|
||||
desc: "nested with tag",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"AA.II": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"AA.II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleWithTag{
|
||||
want: &NestedSimpleWithTag{
|
||||
A: SimpleWithTag{I: 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with anonymous struct field",
|
||||
&pb.Entity{
|
||||
desc: "nested with anonymous struct field",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleAnonymous{
|
||||
want: &NestedSimpleAnonymous{
|
||||
Simple: Simple{I: 2},
|
||||
X: "two",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with dotted field tag",
|
||||
&pb.Entity{
|
||||
desc: "nested with dotted field tag",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A.B.B": {ValueType: &pb.Value_StringValue{"bb"}},
|
||||
"A.B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}},
|
||||
},
|
||||
},
|
||||
&ABDotB{
|
||||
want: &ABDotB{
|
||||
A: BDotB{
|
||||
B: "bb",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with multiple anonymous fields",
|
||||
&pb.Entity{
|
||||
desc: "nested with multiple anonymous fields",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"S": {ValueType: &pb.Value_StringValue{"S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
"X": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
"S": {ValueType: &pb.Value_StringValue{StringValue: "S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "s"}},
|
||||
},
|
||||
},
|
||||
&MultiAnonymous{
|
||||
want: &MultiAnonymous{
|
||||
Simple: Simple{I: 3},
|
||||
SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"},
|
||||
X: "s",
|
||||
|
@ -193,71 +193,71 @@ func TestLoadEntityNested(t *testing.T) {
|
|||
want interface{}
|
||||
}{
|
||||
{
|
||||
"nested basic",
|
||||
&pb.Entity{
|
||||
desc: "nested basic",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{10}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}},
|
||||
},
|
||||
},
|
||||
&NestedSimple{
|
||||
want: &NestedSimple{
|
||||
A: Simple{I: 3},
|
||||
I: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with struct tags",
|
||||
&pb.Entity{
|
||||
desc: "nested with struct tags",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"AA": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"II": {ValueType: &pb.Value_IntegerValue{1}},
|
||||
"II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleWithTag{
|
||||
want: &NestedSimpleWithTag{
|
||||
A: SimpleWithTag{I: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested 2x",
|
||||
&pb.Entity{
|
||||
desc: "nested 2x",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"AA": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{1}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"S": {ValueType: &pb.Value_StringValue{"S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
"S": {ValueType: &pb.Value_StringValue{StringValue: "S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"S": {ValueType: &pb.Value_StringValue{"SS"}},
|
||||
"S": {ValueType: &pb.Value_StringValue{StringValue: "SS"}},
|
||||
},
|
||||
},
|
||||
&NestedSimple2X{
|
||||
want: &NestedSimple2X{
|
||||
AA: NestedSimple{
|
||||
A: Simple{I: 3},
|
||||
I: 1,
|
||||
|
@ -267,36 +267,36 @@ func TestLoadEntityNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"nested anonymous",
|
||||
&pb.Entity{
|
||||
desc: "nested anonymous",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"X": {ValueType: &pb.Value_StringValue{"SomeX"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "SomeX"}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleAnonymous{
|
||||
want: &NestedSimpleAnonymous{
|
||||
Simple: Simple{I: 3},
|
||||
X: "SomeX",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested simple with slice",
|
||||
&pb.Entity{
|
||||
desc: "nested simple with slice",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_ArrayValue{
|
||||
&pb.ArrayValue{
|
||||
[]*pb.Value{
|
||||
ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{4}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
@ -306,63 +306,63 @@ func TestLoadEntityNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
|
||||
&NestedSliceOfSimple{
|
||||
want: &NestedSliceOfSimple{
|
||||
A: []Simple{Simple{I: 3}, Simple{I: 4}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with multiple anonymous fields",
|
||||
&pb.Entity{
|
||||
desc: "nested with multiple anonymous fields",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"S": {ValueType: &pb.Value_StringValue{"S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
"X": {ValueType: &pb.Value_StringValue{"ss"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
"S": {ValueType: &pb.Value_StringValue{StringValue: "S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "ss"}},
|
||||
},
|
||||
},
|
||||
&MultiAnonymous{
|
||||
want: &MultiAnonymous{
|
||||
Simple: Simple{I: 3},
|
||||
SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"},
|
||||
X: "ss",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with dotted field tag",
|
||||
&pb.Entity{
|
||||
desc: "nested with dotted field tag",
|
||||
src: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"B.B": {ValueType: &pb.Value_StringValue{"bb"}},
|
||||
"B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&ABDotB{
|
||||
want: &ABDotB{
|
||||
A: BDotB{
|
||||
B: "bb",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested entity with key",
|
||||
&pb.Entity{
|
||||
desc: "nested entity with key",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Key: keyToProto(testKey1a),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&NestedWithKey{
|
||||
want: &NestedWithKey{
|
||||
Y: "yyy",
|
||||
N: WithKey{
|
||||
X: "two",
|
||||
|
@ -372,23 +372,23 @@ func TestLoadEntityNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"nested entity with invalid key",
|
||||
&pb.Entity{
|
||||
desc: "nested entity with invalid key",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Key: keyToProto(invalidKey),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&NestedWithKey{
|
||||
want: &NestedWithKey{
|
||||
Y: "yyy",
|
||||
N: WithKey{
|
||||
X: "two",
|
||||
|
@ -433,49 +433,49 @@ func TestAlreadyPopulatedDst(t *testing.T) {
|
|||
want interface{}
|
||||
}{
|
||||
{
|
||||
"simple already populated, nil properties",
|
||||
&pb.Entity{
|
||||
desc: "simple already populated, nil properties",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_NullValue{}},
|
||||
},
|
||||
},
|
||||
&Simple{
|
||||
dst: &Simple{
|
||||
I: 12,
|
||||
},
|
||||
&Simple{},
|
||||
want: &Simple{},
|
||||
},
|
||||
{
|
||||
"nested structs already populated",
|
||||
&pb.Entity{
|
||||
desc: "nested structs already populated",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{"world"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "world"}},
|
||||
},
|
||||
},
|
||||
&SimpleTwoFields{S: "hello" /* SS: "" */},
|
||||
&SimpleTwoFields{S: "hello", SS: "world"},
|
||||
dst: &SimpleTwoFields{S: "hello" /* SS: "" */},
|
||||
want: &SimpleTwoFields{S: "hello", SS: "world"},
|
||||
},
|
||||
{
|
||||
"nested structs already populated, pValues nil",
|
||||
&pb.Entity{
|
||||
desc: "nested structs already populated, pValues nil",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"S": {ValueType: &pb.Value_NullValue{}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"ss hello"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{StringValue: "ss hello"}},
|
||||
"Nest": {ValueType: &pb.Value_NullValue{}},
|
||||
"TwiceNest": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_NullValue{}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{5}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 5}},
|
||||
},
|
||||
},
|
||||
&NestedStructPtrs{
|
||||
dst: &NestedStructPtrs{
|
||||
&SimpleTwoFields{S: "hello" /* SS: "" */},
|
||||
&SimpleTwoFields{ /* S: "" */ SS: "twice hello"},
|
||||
&NestedSimple2{
|
||||
|
@ -484,7 +484,7 @@ func TestAlreadyPopulatedDst(t *testing.T) {
|
|||
},
|
||||
0,
|
||||
},
|
||||
&NestedStructPtrs{
|
||||
want: &NestedStructPtrs{
|
||||
&SimpleTwoFields{ /* S: "" */ SS: "ss hello"},
|
||||
nil,
|
||||
&NestedSimple2{
|
||||
|
@ -508,3 +508,248 @@ func TestAlreadyPopulatedDst(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
type PLS0 struct {
|
||||
A string
|
||||
}
|
||||
|
||||
func (p *PLS0) Load(props []Property) error {
|
||||
for _, pp := range props {
|
||||
if pp.Name == "A" {
|
||||
p.A = pp.Value.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PLS0) Save() (props []Property, err error) {
|
||||
return []Property{{Name: "A", Value: p.A}}, nil
|
||||
}
|
||||
|
||||
type KeyLoader1 struct {
|
||||
A string
|
||||
K *Key
|
||||
}
|
||||
|
||||
func (kl *KeyLoader1) Load(props []Property) error {
|
||||
for _, pp := range props {
|
||||
if pp.Name == "A" {
|
||||
kl.A = pp.Value.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kl *KeyLoader1) Save() (props []Property, err error) {
|
||||
return []Property{{Name: "A", Value: kl.A}}, nil
|
||||
}
|
||||
|
||||
func (kl *KeyLoader1) LoadKey(k *Key) error {
|
||||
kl.K = k
|
||||
return nil
|
||||
}
|
||||
|
||||
type KeyLoader2 struct {
|
||||
B int
|
||||
Key *Key
|
||||
}
|
||||
|
||||
func (kl *KeyLoader2) Load(props []Property) error {
|
||||
for _, pp := range props {
|
||||
if pp.Name == "B" {
|
||||
kl.B = int(pp.Value.(int64))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kl *KeyLoader2) Save() (props []Property, err error) {
|
||||
return []Property{{Name: "B", Value: int64(kl.B)}}, nil
|
||||
}
|
||||
|
||||
func (kl *KeyLoader2) LoadKey(k *Key) error {
|
||||
kl.Key = k
|
||||
return nil
|
||||
}
|
||||
|
||||
type KeyLoader3 struct {
|
||||
C bool
|
||||
K *Key
|
||||
}
|
||||
|
||||
func (kl *KeyLoader3) Load(props []Property) error {
|
||||
for _, pp := range props {
|
||||
if pp.Name == "C" {
|
||||
kl.C = pp.Value.(bool)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kl *KeyLoader3) Save() (props []Property, err error) {
|
||||
return []Property{{Name: "C", Value: kl.C}}, nil
|
||||
}
|
||||
|
||||
func (kl *KeyLoader3) LoadKey(k *Key) error {
|
||||
kl.K = k
|
||||
return nil
|
||||
}
|
||||
|
||||
type KeyLoader4 struct {
|
||||
PLS0
|
||||
K *Key
|
||||
}
|
||||
|
||||
func (kl *KeyLoader4) LoadKey(k *Key) error {
|
||||
kl.K = k
|
||||
return nil
|
||||
}
|
||||
|
||||
type NotKeyLoader struct {
|
||||
A string
|
||||
K *Key
|
||||
}
|
||||
|
||||
func (p *NotKeyLoader) Load(props []Property) error {
|
||||
for _, pp := range props {
|
||||
if pp.Name == "A" {
|
||||
p.A = pp.Value.(string)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *NotKeyLoader) Save() (props []Property, err error) {
|
||||
return []Property{{Name: "A", Value: p.A}}, nil
|
||||
}
|
||||
|
||||
type NestedKeyLoaders struct {
|
||||
Two *KeyLoader2
|
||||
Three []*KeyLoader3
|
||||
Four *KeyLoader4
|
||||
PLS *NotKeyLoader
|
||||
}
|
||||
|
||||
func TestKeyLoader(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
src *pb.Entity
|
||||
dst interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{
|
||||
desc: "simple key loader",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}},
|
||||
},
|
||||
},
|
||||
dst: &KeyLoader1{},
|
||||
want: &KeyLoader1{
|
||||
A: "hello",
|
||||
K: testKey0,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "embedded PLS key loader",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}},
|
||||
},
|
||||
},
|
||||
dst: &KeyLoader4{},
|
||||
want: &KeyLoader4{
|
||||
PLS0: PLS0{A: "hello"},
|
||||
K: testKey0,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "nested key loaders",
|
||||
src: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Two": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"B": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}},
|
||||
},
|
||||
Key: keyToProto(testKey1a),
|
||||
},
|
||||
}},
|
||||
"Three": {ValueType: &pb.Value_ArrayValue{
|
||||
ArrayValue: &pb.ArrayValue{
|
||||
Values: []*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"C": {ValueType: &pb.Value_BooleanValue{BooleanValue: true}},
|
||||
},
|
||||
Key: keyToProto(testKey1b),
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"C": {ValueType: &pb.Value_BooleanValue{BooleanValue: false}},
|
||||
},
|
||||
Key: keyToProto(testKey0),
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"Four": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_StringValue{StringValue: "testing"}},
|
||||
},
|
||||
Key: keyToProto(testKey2a),
|
||||
},
|
||||
}},
|
||||
"PLS": {ValueType: &pb.Value_EntityValue{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_StringValue{StringValue: "something"}},
|
||||
},
|
||||
|
||||
Key: keyToProto(testKey1a),
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
dst: &NestedKeyLoaders{},
|
||||
want: &NestedKeyLoaders{
|
||||
Two: &KeyLoader2{B: 12, Key: testKey1a},
|
||||
Three: []*KeyLoader3{
|
||||
{
|
||||
C: true,
|
||||
K: testKey1b,
|
||||
},
|
||||
{
|
||||
C: false,
|
||||
K: testKey0,
|
||||
},
|
||||
},
|
||||
Four: &KeyLoader4{
|
||||
PLS0: PLS0{A: "testing"},
|
||||
K: testKey2a,
|
||||
},
|
||||
PLS: &NotKeyLoader{A: "something"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
err := loadEntityProto(tc.dst, tc.src)
|
||||
if err != nil {
|
||||
t.Errorf("loadEntityProto: %s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, tc.dst) {
|
||||
t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
63
vendor/cloud.google.com/go/datastore/prop.go
generated
vendored
63
vendor/cloud.google.com/go/datastore/prop.go
generated
vendored
|
@ -82,6 +82,14 @@ type PropertyLoadSaver interface {
|
|||
Save() ([]Property, error)
|
||||
}
|
||||
|
||||
// KeyLoader can store a Key.
|
||||
type KeyLoader interface {
|
||||
// PropertyLoadSaver is embedded because a KeyLoader
|
||||
// must also always implement PropertyLoadSaver.
|
||||
PropertyLoadSaver
|
||||
LoadKey(k *Key) error
|
||||
}
|
||||
|
||||
// PropertyList converts a []Property to implement PropertyLoadSaver.
|
||||
type PropertyList []Property
|
||||
|
||||
|
@ -277,3 +285,58 @@ func SaveStruct(src interface{}) ([]Property, error) {
|
|||
}
|
||||
return x.Save()
|
||||
}
|
||||
|
||||
// plsForLoad tries to convert v to a PropertyLoadSaver.
|
||||
// If successful, plsForLoad returns a settable v as a PropertyLoadSaver.
|
||||
//
|
||||
// plsForLoad is intended to be used with nested struct fields which
|
||||
// may implement PropertyLoadSaver.
|
||||
//
|
||||
// v must be settable.
|
||||
func plsForLoad(v reflect.Value) (PropertyLoadSaver, error) {
|
||||
var nilPtr bool
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
nilPtr = true
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
|
||||
vpls, err := pls(v)
|
||||
if nilPtr && (vpls == nil || err != nil) {
|
||||
// unset v
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
}
|
||||
|
||||
return vpls, err
|
||||
}
|
||||
|
||||
// plsForSave tries to convert v to a PropertyLoadSaver.
|
||||
// If successful, plsForSave returns v as a PropertyLoadSaver.
|
||||
//
|
||||
// plsForSave is intended to be used with nested struct fields which
|
||||
// may implement PropertyLoadSaver.
|
||||
//
|
||||
// v must be settable.
|
||||
func plsForSave(v reflect.Value) (PropertyLoadSaver, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Chan, reflect.Func:
|
||||
// If v is nil, return early. v contains no data to save.
|
||||
if v.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return pls(v)
|
||||
}
|
||||
|
||||
func pls(v reflect.Value) (PropertyLoadSaver, error) {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if _, ok := v.Interface().(PropertyLoadSaver); ok {
|
||||
return nil, fmt.Errorf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface())
|
||||
}
|
||||
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
vpls, _ := v.Interface().(PropertyLoadSaver)
|
||||
return vpls, nil
|
||||
}
|
||||
|
|
30
vendor/cloud.google.com/go/datastore/query_test.go
generated
vendored
30
vendor/cloud.google.com/go/datastore/query_test.go
generated
vendored
|
@ -32,7 +32,7 @@ var (
|
|||
Path: []*pb.Key_PathElement{
|
||||
{
|
||||
Kind: "Gopher",
|
||||
IdType: &pb.Key_PathElement_Id{6},
|
||||
IdType: &pb.Key_PathElement_Id{Id: 6},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -40,11 +40,11 @@ var (
|
|||
Path: []*pb.Key_PathElement{
|
||||
{
|
||||
Kind: "Gopher",
|
||||
IdType: &pb.Key_PathElement_Id{6},
|
||||
IdType: &pb.Key_PathElement_Id{Id: 6},
|
||||
},
|
||||
{
|
||||
Kind: "Gopher",
|
||||
IdType: &pb.Key_PathElement_Id{8},
|
||||
IdType: &pb.Key_PathElement_Id{Id: 8},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.
|
|||
|
||||
func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) {
|
||||
expectedIn := &pb.RunQueryRequest{
|
||||
QueryType: &pb.RunQueryRequest_Query{&pb.Query{
|
||||
QueryType: &pb.RunQueryRequest_Query{Query: &pb.Query{
|
||||
Kind: []*pb.KindExpression{{Name: "Gopher"}},
|
||||
}},
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) {
|
|||
Entity: &pb.Entity{
|
||||
Key: key1,
|
||||
Properties: map[string]*pb.Value{
|
||||
"Name": {ValueType: &pb.Value_StringValue{"George"}},
|
||||
"Height": {ValueType: &pb.Value_IntegerValue{32}},
|
||||
"Name": {ValueType: &pb.Value_StringValue{StringValue: "George"}},
|
||||
"Height": {ValueType: &pb.Value_IntegerValue{IntegerValue: 32}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -91,7 +91,7 @@ func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) {
|
|||
Entity: &pb.Entity{
|
||||
Key: key2,
|
||||
Properties: map[string]*pb.Value{
|
||||
"Name": {ValueType: &pb.Value_StringValue{"Rufus"}},
|
||||
"Name": {ValueType: &pb.Value_StringValue{StringValue: "Rufus"}},
|
||||
// No height for Rufus.
|
||||
},
|
||||
},
|
||||
|
@ -507,12 +507,20 @@ func TestReadOptions(t *testing.T) {
|
|||
want: nil,
|
||||
},
|
||||
{
|
||||
q: NewQuery("").Transaction(&Transaction{id: tid}),
|
||||
want: &pb.ReadOptions{&pb.ReadOptions_Transaction{tid}},
|
||||
q: NewQuery("").Transaction(&Transaction{id: tid}),
|
||||
want: &pb.ReadOptions{
|
||||
ConsistencyType: &pb.ReadOptions_Transaction{
|
||||
Transaction: tid,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
q: NewQuery("").EventualConsistency(),
|
||||
want: &pb.ReadOptions{&pb.ReadOptions_ReadConsistency_{pb.ReadOptions_EVENTUAL}},
|
||||
q: NewQuery("").EventualConsistency(),
|
||||
want: &pb.ReadOptions{
|
||||
ConsistencyType: &pb.ReadOptions_ReadConsistency_{
|
||||
ReadConsistency: pb.ReadOptions_EVENTUAL,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
req := &pb.RunQueryRequest{}
|
||||
|
|
92
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
92
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
|
@ -19,6 +19,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
timepb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
|
@ -57,8 +58,15 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect
|
|||
return nil
|
||||
}
|
||||
|
||||
// Check if v implements PropertyLoadSaver.
|
||||
pls, isPLS := v.Interface().(PropertyLoadSaver)
|
||||
// First check if field type implements PLS. If so, use PLS to
|
||||
// save.
|
||||
ok, err := plsFieldSave(props, p, name, opts, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch x := v.Interface().(type) {
|
||||
case *Key, time.Time, GeoPoint:
|
||||
|
@ -89,19 +97,12 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect
|
|||
v = v.Elem()
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
if isPLS {
|
||||
subProps, err := pls.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Value = &Entity{Properties: subProps}
|
||||
break
|
||||
}
|
||||
|
||||
if !v.CanAddr() {
|
||||
return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
|
||||
}
|
||||
sub, err := newStructPLS(v.Addr().Interface())
|
||||
vi := v.Addr().Interface()
|
||||
|
||||
sub, err := newStructPLS(vi)
|
||||
if err != nil {
|
||||
return fmt.Errorf("datastore: unsupported struct field: %v", err)
|
||||
}
|
||||
|
@ -133,6 +134,44 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect
|
|||
return nil
|
||||
}
|
||||
|
||||
// plsFieldSave first tries to converts v's value to a PLS, then v's addressed
|
||||
// value to a PLS. If neither succeeds, plsFieldSave returns false for first return
|
||||
// value.
|
||||
// If v is successfully converted to a PLS, plsFieldSave will then add the
|
||||
// Value to property p by way of the PLS's Save method, and append it to props.
|
||||
//
|
||||
// If the flatten option is present in opts, name must be prepended to each property's
|
||||
// name before it is appended to props. Eg. if name were "A" and a subproperty's name
|
||||
// were "B", the resultant name of the property to be appended to props would be "A.B".
|
||||
func plsFieldSave(props *[]Property, p Property, name string, opts saveOpts, v reflect.Value) (ok bool, err error) {
|
||||
vpls, err := plsForSave(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if vpls == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
subProps, err := vpls.Save()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
if opts.flatten {
|
||||
for _, subp := range subProps {
|
||||
subp.Name = name + "." + subp.Name
|
||||
*props = append(*props, subp)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
p.Value = &Entity{Properties: subProps}
|
||||
*props = append(*props, p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// key extracts the *Key struct field from struct v based on the structCodec of s.
|
||||
func (s structPLS) key(v reflect.Value) (*Key, error) {
|
||||
if v.Kind() != reflect.Struct {
|
||||
|
@ -290,33 +329,36 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
|
|||
val := &pb.Value{ExcludeFromIndexes: noIndex}
|
||||
switch v := iv.(type) {
|
||||
case int:
|
||||
val.ValueType = &pb.Value_IntegerValue{int64(v)}
|
||||
val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)}
|
||||
case int32:
|
||||
val.ValueType = &pb.Value_IntegerValue{int64(v)}
|
||||
val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)}
|
||||
case int64:
|
||||
val.ValueType = &pb.Value_IntegerValue{v}
|
||||
val.ValueType = &pb.Value_IntegerValue{IntegerValue: v}
|
||||
case bool:
|
||||
val.ValueType = &pb.Value_BooleanValue{v}
|
||||
val.ValueType = &pb.Value_BooleanValue{BooleanValue: v}
|
||||
case string:
|
||||
if len(v) > 1500 && !noIndex {
|
||||
return nil, errors.New("string property too long to index")
|
||||
}
|
||||
val.ValueType = &pb.Value_StringValue{v}
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, fmt.Errorf("string is not valid utf8: %q", v)
|
||||
}
|
||||
val.ValueType = &pb.Value_StringValue{StringValue: v}
|
||||
case float32:
|
||||
val.ValueType = &pb.Value_DoubleValue{float64(v)}
|
||||
val.ValueType = &pb.Value_DoubleValue{DoubleValue: float64(v)}
|
||||
case float64:
|
||||
val.ValueType = &pb.Value_DoubleValue{v}
|
||||
val.ValueType = &pb.Value_DoubleValue{DoubleValue: v}
|
||||
case *Key:
|
||||
if v == nil {
|
||||
val.ValueType = &pb.Value_NullValue{}
|
||||
} else {
|
||||
val.ValueType = &pb.Value_KeyValue{keyToProto(v)}
|
||||
val.ValueType = &pb.Value_KeyValue{KeyValue: keyToProto(v)}
|
||||
}
|
||||
case GeoPoint:
|
||||
if !v.Valid() {
|
||||
return nil, errors.New("invalid GeoPoint value")
|
||||
}
|
||||
val.ValueType = &pb.Value_GeoPointValue{&llpb.LatLng{
|
||||
val.ValueType = &pb.Value_GeoPointValue{GeoPointValue: &llpb.LatLng{
|
||||
Latitude: v.Lat,
|
||||
Longitude: v.Lng,
|
||||
}}
|
||||
|
@ -324,7 +366,7 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
|
|||
if v.Before(minTime) || v.After(maxTime) {
|
||||
return nil, errors.New("time value out of range")
|
||||
}
|
||||
val.ValueType = &pb.Value_TimestampValue{&timepb.Timestamp{
|
||||
val.ValueType = &pb.Value_TimestampValue{TimestampValue: &timepb.Timestamp{
|
||||
Seconds: v.Unix(),
|
||||
Nanos: int32(v.Nanosecond()),
|
||||
}}
|
||||
|
@ -332,13 +374,13 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
|
|||
if len(v) > 1500 && !noIndex {
|
||||
return nil, errors.New("[]byte property too long to index")
|
||||
}
|
||||
val.ValueType = &pb.Value_BlobValue{v}
|
||||
val.ValueType = &pb.Value_BlobValue{BlobValue: v}
|
||||
case *Entity:
|
||||
e, err := propertiesToProto(v.Key, v.Properties)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val.ValueType = &pb.Value_EntityValue{e}
|
||||
val.ValueType = &pb.Value_EntityValue{EntityValue: e}
|
||||
case []interface{}:
|
||||
arr := make([]*pb.Value, 0, len(v))
|
||||
for i, v := range v {
|
||||
|
@ -348,7 +390,7 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
|
|||
}
|
||||
arr = append(arr, elem)
|
||||
}
|
||||
val.ValueType = &pb.Value_ArrayValue{&pb.ArrayValue{arr}}
|
||||
val.ValueType = &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: arr}}
|
||||
// ArrayValues have ExcludeFromIndexes set on the individual items, rather
|
||||
// than the top-level value.
|
||||
val.ExcludeFromIndexes = false
|
||||
|
|
70
vendor/cloud.google.com/go/datastore/save_test.go
generated
vendored
70
vendor/cloud.google.com/go/datastore/save_test.go
generated
vendored
|
@ -71,8 +71,8 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
want *pb.Entity
|
||||
}{
|
||||
{
|
||||
"nested entity with key",
|
||||
&NestedWithKey{
|
||||
desc: "nested entity with key",
|
||||
src: &NestedWithKey{
|
||||
Y: "yyy",
|
||||
N: WithKey{
|
||||
X: "two",
|
||||
|
@ -80,17 +80,17 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
K: testKey1a,
|
||||
},
|
||||
},
|
||||
testKey0,
|
||||
&pb.Entity{
|
||||
key: testKey0,
|
||||
want: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Key: keyToProto(testKey1a),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
@ -98,8 +98,8 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"nested entity with incomplete key",
|
||||
&NestedWithKey{
|
||||
desc: "nested entity with incomplete key",
|
||||
src: &NestedWithKey{
|
||||
Y: "yyy",
|
||||
N: WithKey{
|
||||
X: "two",
|
||||
|
@ -107,17 +107,17 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
K: incompleteKey,
|
||||
},
|
||||
},
|
||||
testKey0,
|
||||
&pb.Entity{
|
||||
key: testKey0,
|
||||
want: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Key: keyToProto(incompleteKey),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
@ -125,24 +125,24 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"nested entity without key",
|
||||
&NestedWithoutKey{
|
||||
desc: "nested entity without key",
|
||||
src: &NestedWithoutKey{
|
||||
Y: "yyy",
|
||||
N: WithoutKey{
|
||||
X: "two",
|
||||
I: 2,
|
||||
},
|
||||
},
|
||||
testKey0,
|
||||
&pb.Entity{
|
||||
key: testKey0,
|
||||
want: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
EntityValue: &pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
|
@ -150,31 +150,31 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
"key at top level",
|
||||
&WithKey{
|
||||
desc: "key at top level",
|
||||
src: &WithKey{
|
||||
X: "three",
|
||||
I: 3,
|
||||
K: testKey0,
|
||||
},
|
||||
testKey0,
|
||||
&pb.Entity{
|
||||
key: testKey0,
|
||||
want: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"three"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"X": {ValueType: &pb.Value_StringValue{StringValue: "three"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested unexported anonymous struct field",
|
||||
&UnexpAnonym{
|
||||
desc: "nested unexported anonymous struct field",
|
||||
src: &UnexpAnonym{
|
||||
a{S: "hello"},
|
||||
},
|
||||
testKey0,
|
||||
&pb.Entity{
|
||||
key: testKey0,
|
||||
want: &pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"S": {ValueType: &pb.Value_StringValue{"hello"}},
|
||||
"S": {ValueType: &pb.Value_StringValue{StringValue: "hello"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
6
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
6
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
|
@ -150,7 +150,7 @@ func (t *Transaction) Commit() (*Commit, error) {
|
|||
}
|
||||
req := &pb.CommitRequest{
|
||||
ProjectId: t.client.dataset,
|
||||
TransactionSelector: &pb.CommitRequest_Transaction{t.id},
|
||||
TransactionSelector: &pb.CommitRequest_Transaction{Transaction: t.id},
|
||||
Mutations: t.mutations,
|
||||
Mode: pb.CommitRequest_TRANSACTIONAL,
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ func (t *Transaction) Rollback() error {
|
|||
// or modified by this transaction.
|
||||
func (t *Transaction) Get(key *Key, dst interface{}) error {
|
||||
opts := &pb.ReadOptions{
|
||||
ConsistencyType: &pb.ReadOptions_Transaction{t.id},
|
||||
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
|
||||
}
|
||||
err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
|
||||
if me, ok := err.(MultiError); ok {
|
||||
|
@ -216,7 +216,7 @@ func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error {
|
|||
return errExpiredTransaction
|
||||
}
|
||||
opts := &pb.ReadOptions{
|
||||
ConsistencyType: &pb.ReadOptions_Transaction{t.id},
|
||||
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
|
||||
}
|
||||
return t.client.get(t.ctx, keys, dst, opts)
|
||||
}
|
||||
|
|
49
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
generated
vendored
49
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
generated
vendored
|
@ -39,10 +39,7 @@ type Controller2CallOptions struct {
|
|||
func defaultController2ClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("clouddebugger.googleapis.com:443"),
|
||||
option.WithScopes(
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud_debugger",
|
||||
),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,17 +57,6 @@ func defaultController2CallOptions() *Controller2CallOptions {
|
|||
})
|
||||
}),
|
||||
},
|
||||
{"default", "non_idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &Controller2CallOptions{
|
||||
RegisterDebuggee: retry[[2]string{"default", "non_idempotent"}],
|
||||
|
@ -91,7 +77,7 @@ type Controller2Client struct {
|
|||
CallOptions *Controller2CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader string
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewController2Client creates a new controller2 client.
|
||||
|
@ -147,8 +133,8 @@ func (c *Controller2Client) Close() error {
|
|||
// use by Google-written clients.
|
||||
func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "")
|
||||
c.xGoogHeader = gax.XGoogHeader(kv...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// RegisterDebuggee registers the debuggee with the controller service.
|
||||
|
@ -161,14 +147,15 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
|
|||
// This allows the controller service to disable the agent or recover from any
|
||||
// data loss. If the debuggee is disabled by the server, the response will
|
||||
// have `is_disabled` set to `true`.
|
||||
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
|
||||
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
|
||||
var resp *clouddebuggerpb.RegisterDebuggeeResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.controller2Client.RegisterDebuggee(ctx, req)
|
||||
resp, err = c.controller2Client.RegisterDebuggee(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.RegisterDebuggee...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -188,14 +175,15 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
|
|||
// Moreover, an agent should remember the breakpoints that are completed
|
||||
// until the controller removes them from the active list to avoid
|
||||
// setting those breakpoints again.
|
||||
func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
|
||||
func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...)
|
||||
var resp *clouddebuggerpb.ListActiveBreakpointsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req)
|
||||
resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListActiveBreakpoints...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -211,14 +199,15 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
|
|||
// `condition` and `expression` fields should not alter the breakpoint
|
||||
// semantics. These may only make changes such as canonicalizing a value
|
||||
// or snapping the location to the correct line of code.
|
||||
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
|
||||
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.UpdateActiveBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req)
|
||||
resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.UpdateActiveBreakpoint...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
67
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
generated
vendored
67
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
generated
vendored
|
@ -41,10 +41,7 @@ type Debugger2CallOptions struct {
|
|||
func defaultDebugger2ClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("clouddebugger.googleapis.com:443"),
|
||||
option.WithScopes(
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud_debugger",
|
||||
),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,17 +59,6 @@ func defaultDebugger2CallOptions() *Debugger2CallOptions {
|
|||
})
|
||||
}),
|
||||
},
|
||||
{"default", "non_idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &Debugger2CallOptions{
|
||||
SetBreakpoint: retry[[2]string{"default", "non_idempotent"}],
|
||||
|
@ -95,7 +81,7 @@ type Debugger2Client struct {
|
|||
CallOptions *Debugger2CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader string
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewDebugger2Client creates a new debugger2 client.
|
||||
|
@ -143,19 +129,20 @@ func (c *Debugger2Client) Close() error {
|
|||
// use by Google-written clients.
|
||||
func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "")
|
||||
c.xGoogHeader = gax.XGoogHeader(kv...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// SetBreakpoint sets the breakpoint to the debuggee.
|
||||
func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) {
|
||||
func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.SetBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.debugger2Client.SetBreakpoint(ctx, req)
|
||||
resp, err = c.debugger2Client.SetBreakpoint(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.SetBreakpoint...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -163,14 +150,15 @@ func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerp
|
|||
}
|
||||
|
||||
// GetBreakpoint gets breakpoint information.
|
||||
func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) {
|
||||
func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.GetBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.debugger2Client.GetBreakpoint(ctx, req)
|
||||
resp, err = c.debugger2Client.GetBreakpoint(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.GetBreakpoint...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -178,25 +166,27 @@ func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerp
|
|||
}
|
||||
|
||||
// DeleteBreakpoint deletes the breakpoint from the debuggee.
|
||||
func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) error {
|
||||
func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.debugger2Client.DeleteBreakpoint(ctx, req)
|
||||
_, err = c.debugger2Client.DeleteBreakpoint(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.DeleteBreakpoint...)
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListBreakpoints lists all breakpoints for the debuggee.
|
||||
func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) {
|
||||
func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...)
|
||||
var resp *clouddebuggerpb.ListBreakpointsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.debugger2Client.ListBreakpoints(ctx, req)
|
||||
resp, err = c.debugger2Client.ListBreakpoints(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListBreakpoints...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -204,14 +194,15 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
|
|||
}
|
||||
|
||||
// ListDebuggees lists all the debuggees that the user can set breakpoints to.
|
||||
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) {
|
||||
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)
|
||||
var resp *clouddebuggerpb.ListDebuggeesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.debugger2Client.ListDebuggees(ctx, req)
|
||||
resp, err = c.debugger2Client.ListDebuggees(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListDebuggees...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
19
vendor/cloud.google.com/go/debugger/apiv2/doc.go
generated
vendored
19
vendor/cloud.google.com/go/debugger/apiv2/doc.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package debugger is an experimental, auto-generated package for the
|
||||
// debugger API.
|
||||
// Stackdriver Debugger API.
|
||||
//
|
||||
// Examines the call stack and variables of a running application
|
||||
// without stopping or slowing it down.
|
||||
|
@ -28,9 +28,18 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val string) context.Context {
|
||||
md, _ := metadata.FromContext(ctx)
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = []string{val}
|
||||
return metadata.NewContext(ctx, md)
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the authentication scopes required
|
||||
// by this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud_debugger",
|
||||
}
|
||||
}
|
||||
|
|
122
vendor/cloud.google.com/go/debugger/apiv2/mock_test.go
generated
vendored
122
vendor/cloud.google.com/go/debugger/apiv2/mock_test.go
generated
vendored
|
@ -17,16 +17,18 @@
|
|||
package debugger
|
||||
|
||||
import (
|
||||
google_protobuf "github.com/golang/protobuf/ptypes/empty"
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
|
||||
)
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -36,6 +38,8 @@ import (
|
|||
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
gstatus "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ = io.EOF
|
||||
|
@ -57,7 +61,11 @@ type mockDebugger2Server struct {
|
|||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockDebugger2Server) SetBreakpoint(_ context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) {
|
||||
func (s *mockDebugger2Server) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -65,7 +73,11 @@ func (s *mockDebugger2Server) SetBreakpoint(_ context.Context, req *clouddebugge
|
|||
return s.resps[0].(*clouddebuggerpb.SetBreakpointResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDebugger2Server) GetBreakpoint(_ context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) {
|
||||
func (s *mockDebugger2Server) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -73,15 +85,23 @@ func (s *mockDebugger2Server) GetBreakpoint(_ context.Context, req *clouddebugge
|
|||
return s.resps[0].(*clouddebuggerpb.GetBreakpointResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDebugger2Server) DeleteBreakpoint(_ context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) (*google_protobuf.Empty, error) {
|
||||
func (s *mockDebugger2Server) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) (*emptypb.Empty, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*google_protobuf.Empty), nil
|
||||
return s.resps[0].(*emptypb.Empty), nil
|
||||
}
|
||||
|
||||
func (s *mockDebugger2Server) ListBreakpoints(_ context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) {
|
||||
func (s *mockDebugger2Server) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -89,7 +109,11 @@ func (s *mockDebugger2Server) ListBreakpoints(_ context.Context, req *clouddebug
|
|||
return s.resps[0].(*clouddebuggerpb.ListBreakpointsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDebugger2Server) ListDebuggees(_ context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) {
|
||||
func (s *mockDebugger2Server) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -112,7 +136,11 @@ type mockController2Server struct {
|
|||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockController2Server) RegisterDebuggee(_ context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
|
||||
func (s *mockController2Server) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -120,7 +148,11 @@ func (s *mockController2Server) RegisterDebuggee(_ context.Context, req *cloudde
|
|||
return s.resps[0].(*clouddebuggerpb.RegisterDebuggeeResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockController2Server) ListActiveBreakpoints(_ context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
|
||||
func (s *mockController2Server) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -128,7 +160,11 @@ func (s *mockController2Server) ListActiveBreakpoints(_ context.Context, req *cl
|
|||
return s.resps[0].(*clouddebuggerpb.ListActiveBreakpointsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockController2Server) UpdateActiveBreakpoint(_ context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
|
||||
func (s *mockController2Server) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -205,8 +241,8 @@ func TestDebugger2SetBreakpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDebugger2SetBreakpointError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockDebugger2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockDebugger2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggeeId string = "debuggeeId-997255898"
|
||||
var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{}
|
||||
|
@ -224,7 +260,9 @@ func TestDebugger2SetBreakpointError(t *testing.T) {
|
|||
|
||||
resp, err := c.SetBreakpoint(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -267,8 +305,8 @@ func TestDebugger2GetBreakpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDebugger2GetBreakpointError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockDebugger2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockDebugger2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggeeId string = "debuggeeId-997255898"
|
||||
var breakpointId string = "breakpointId498424873"
|
||||
|
@ -286,13 +324,15 @@ func TestDebugger2GetBreakpointError(t *testing.T) {
|
|||
|
||||
resp, err := c.GetBreakpoint(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDebugger2DeleteBreakpoint(t *testing.T) {
|
||||
var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{}
|
||||
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
|
||||
|
||||
mockDebugger2.err = nil
|
||||
mockDebugger2.reqs = nil
|
||||
|
@ -326,8 +366,8 @@ func TestDebugger2DeleteBreakpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDebugger2DeleteBreakpointError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockDebugger2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockDebugger2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggeeId string = "debuggeeId-997255898"
|
||||
var breakpointId string = "breakpointId498424873"
|
||||
|
@ -345,7 +385,9 @@ func TestDebugger2DeleteBreakpointError(t *testing.T) {
|
|||
|
||||
err = c.DeleteBreakpoint(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
}
|
||||
|
@ -388,8 +430,8 @@ func TestDebugger2ListBreakpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDebugger2ListBreakpointsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockDebugger2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockDebugger2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggeeId string = "debuggeeId-997255898"
|
||||
var clientVersion string = "clientVersion-1506231196"
|
||||
|
@ -405,7 +447,9 @@ func TestDebugger2ListBreakpointsError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListBreakpoints(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -446,8 +490,8 @@ func TestDebugger2ListDebuggees(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDebugger2ListDebuggeesError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockDebugger2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockDebugger2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var project string = "project-309310695"
|
||||
var clientVersion string = "clientVersion-1506231196"
|
||||
|
@ -463,7 +507,9 @@ func TestDebugger2ListDebuggeesError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListDebuggees(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -502,8 +548,8 @@ func TestController2RegisterDebuggee(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestController2RegisterDebuggeeError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockController2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockController2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{}
|
||||
var request = &clouddebuggerpb.RegisterDebuggeeRequest{
|
||||
|
@ -517,7 +563,9 @@ func TestController2RegisterDebuggeeError(t *testing.T) {
|
|||
|
||||
resp, err := c.RegisterDebuggee(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -561,8 +609,8 @@ func TestController2ListActiveBreakpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestController2ListActiveBreakpointsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockController2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockController2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggeeId string = "debuggeeId-997255898"
|
||||
var request = &clouddebuggerpb.ListActiveBreakpointsRequest{
|
||||
|
@ -576,7 +624,9 @@ func TestController2ListActiveBreakpointsError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListActiveBreakpoints(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -617,8 +667,8 @@ func TestController2UpdateActiveBreakpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestController2UpdateActiveBreakpointError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockController2.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockController2.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var debuggeeId string = "debuggeeId-997255898"
|
||||
var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{}
|
||||
|
@ -634,7 +684,9 @@ func TestController2UpdateActiveBreakpointError(t *testing.T) {
|
|||
|
||||
resp, err := c.UpdateActiveBreakpoint(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
|
86
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
Normal file
86
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package errorreporting
|
||||
|
||||
import (
|
||||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
||||
func TestReportErrorsServiceSmoke(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping smoke test in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
|
||||
projectId := testutil.ProjID()
|
||||
_ = projectId
|
||||
|
||||
c, err := NewReportErrorsClient(ctx, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var formattedProjectName string = ReportErrorsProjectPath(projectId)
|
||||
var message string = "[MESSAGE]"
|
||||
var service string = "[SERVICE]"
|
||||
var serviceContext = &clouderrorreportingpb.ServiceContext{
|
||||
Service: service,
|
||||
}
|
||||
var filePath string = "path/to/file.lang"
|
||||
var lineNumber int32 = 42
|
||||
var functionName string = "meaningOfLife"
|
||||
var reportLocation = &clouderrorreportingpb.SourceLocation{
|
||||
FilePath: filePath,
|
||||
LineNumber: lineNumber,
|
||||
FunctionName: functionName,
|
||||
}
|
||||
var context = &clouderrorreportingpb.ErrorContext{
|
||||
ReportLocation: reportLocation,
|
||||
}
|
||||
var event = &clouderrorreportingpb.ReportedErrorEvent{
|
||||
Message: message,
|
||||
ServiceContext: serviceContext,
|
||||
Context: context,
|
||||
}
|
||||
var request = &clouderrorreportingpb.ReportErrorEventRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
Event: event,
|
||||
}
|
||||
|
||||
if _, err := c.ReportErrorEvent(ctx, request); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
18
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
generated
vendored
18
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package errorreporting is an experimental, auto-generated package for the
|
||||
// errorreporting API.
|
||||
// Stackdriver Error Reporting API.
|
||||
//
|
||||
// Stackdriver Error Reporting groups and counts similar errors from cloud
|
||||
// services. The Stackdriver Error Reporting API provides a way to report new
|
||||
|
@ -27,9 +27,17 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val string) context.Context {
|
||||
md, _ := metadata.FromContext(ctx)
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = []string{val}
|
||||
return metadata.NewContext(ctx, md)
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the authentication scopes required
|
||||
// by this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
||||
|
|
28
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
generated
vendored
28
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
generated
vendored
|
@ -42,9 +42,7 @@ type ErrorGroupCallOptions struct {
|
|||
func defaultErrorGroupClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("clouderrorreporting.googleapis.com:443"),
|
||||
option.WithScopes(
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +79,7 @@ type ErrorGroupClient struct {
|
|||
CallOptions *ErrorGroupCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader string
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewErrorGroupClient creates a new error group service client.
|
||||
|
@ -118,8 +116,8 @@ func (c *ErrorGroupClient) Close() error {
|
|||
// use by Google-written clients.
|
||||
func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "")
|
||||
c.xGoogHeader = gax.XGoogHeader(kv...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// ErrorGroupGroupPath returns the path for the group resource.
|
||||
|
@ -135,14 +133,15 @@ func ErrorGroupGroupPath(project, group string) string {
|
|||
}
|
||||
|
||||
// GetGroup get the specified group.
|
||||
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
|
||||
var resp *clouderrorreportingpb.ErrorGroup
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.errorGroupClient.GetGroup(ctx, req)
|
||||
resp, err = c.errorGroupClient.GetGroup(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.GetGroup...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -151,14 +150,15 @@ func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportin
|
|||
|
||||
// UpdateGroup replace the data for the specified group.
|
||||
// Fails if the group does not exist.
|
||||
func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
|
||||
var resp *clouderrorreportingpb.ErrorGroup
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.errorGroupClient.UpdateGroup(ctx, req)
|
||||
resp, err = c.errorGroupClient.UpdateGroup(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.UpdateGroup...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
37
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
generated
vendored
37
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
generated
vendored
|
@ -45,9 +45,7 @@ type ErrorStatsCallOptions struct {
|
|||
func defaultErrorStatsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("clouderrorreporting.googleapis.com:443"),
|
||||
option.WithScopes(
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,7 +83,7 @@ type ErrorStatsClient struct {
|
|||
CallOptions *ErrorStatsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader string
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewErrorStatsClient creates a new error stats service client.
|
||||
|
@ -123,8 +121,8 @@ func (c *ErrorStatsClient) Close() error {
|
|||
// use by Google-written clients.
|
||||
func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "")
|
||||
c.xGoogHeader = gax.XGoogHeader(kv...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// ErrorStatsProjectPath returns the path for the project resource.
|
||||
|
@ -139,8 +137,9 @@ func ErrorStatsProjectPath(project string) string {
|
|||
}
|
||||
|
||||
// ListGroupStats lists the specified groups.
|
||||
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) *ErrorGroupStatsIterator {
|
||||
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...)
|
||||
it := &ErrorGroupStatsIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) {
|
||||
var resp *clouderrorreportingpb.ListGroupStatsResponse
|
||||
|
@ -150,11 +149,11 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre
|
|||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.errorStatsClient.ListGroupStats(ctx, req)
|
||||
resp, err = c.errorStatsClient.ListGroupStats(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListGroupStats...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
@ -173,8 +172,9 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre
|
|||
}
|
||||
|
||||
// ListEvents lists the specified events.
|
||||
func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) *ErrorEventIterator {
|
||||
func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...)
|
||||
it := &ErrorEventIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) {
|
||||
var resp *clouderrorreportingpb.ListEventsResponse
|
||||
|
@ -184,11 +184,11 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport
|
|||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.errorStatsClient.ListEvents(ctx, req)
|
||||
resp, err = c.errorStatsClient.ListEvents(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListEvents...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
@ -207,14 +207,15 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport
|
|||
}
|
||||
|
||||
// DeleteEvents deletes all error events of a given project.
|
||||
func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) {
|
||||
func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...)
|
||||
var resp *clouderrorreportingpb.DeleteEventsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.errorStatsClient.DeleteEvents(ctx, req)
|
||||
resp, err = c.errorStatsClient.DeleteEvents(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.DeleteEvents...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package errorreporting_test
|
|||
import (
|
||||
"cloud.google.com/go/errorreporting/apiv1beta1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
|
@ -45,9 +46,11 @@ func ExampleErrorStatsClient_ListGroupStats() {
|
|||
it := c.ListGroupStats(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
break
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
|
@ -67,9 +70,11 @@ func ExampleErrorStatsClient_ListEvents() {
|
|||
it := c.ListEvents(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
break
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
|
|
88
vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go
generated
vendored
88
vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go
generated
vendored
|
@ -22,10 +22,12 @@ import (
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -35,6 +37,8 @@ import (
|
|||
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
gstatus "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ = io.EOF
|
||||
|
@ -56,7 +60,11 @@ type mockErrorGroupServer struct {
|
|||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockErrorGroupServer) GetGroup(_ context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
func (s *mockErrorGroupServer) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -64,7 +72,11 @@ func (s *mockErrorGroupServer) GetGroup(_ context.Context, req *clouderrorreport
|
|||
return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil
|
||||
}
|
||||
|
||||
func (s *mockErrorGroupServer) UpdateGroup(_ context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
func (s *mockErrorGroupServer) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -87,7 +99,11 @@ type mockErrorStatsServer struct {
|
|||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockErrorStatsServer) ListGroupStats(_ context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) (*clouderrorreportingpb.ListGroupStatsResponse, error) {
|
||||
func (s *mockErrorStatsServer) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) (*clouderrorreportingpb.ListGroupStatsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -95,7 +111,11 @@ func (s *mockErrorStatsServer) ListGroupStats(_ context.Context, req *clouderror
|
|||
return s.resps[0].(*clouderrorreportingpb.ListGroupStatsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockErrorStatsServer) ListEvents(_ context.Context, req *clouderrorreportingpb.ListEventsRequest) (*clouderrorreportingpb.ListEventsResponse, error) {
|
||||
func (s *mockErrorStatsServer) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) (*clouderrorreportingpb.ListEventsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -103,7 +123,11 @@ func (s *mockErrorStatsServer) ListEvents(_ context.Context, req *clouderrorrepo
|
|||
return s.resps[0].(*clouderrorreportingpb.ListEventsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockErrorStatsServer) DeleteEvents(_ context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) {
|
||||
func (s *mockErrorStatsServer) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -126,7 +150,11 @@ type mockReportErrorsServer struct {
|
|||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockReportErrorsServer) ReportErrorEvent(_ context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
|
||||
func (s *mockReportErrorsServer) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -206,8 +234,8 @@ func TestErrorGroupServiceGetGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorGroupServiceGetGroupError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockErrorGroup.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorGroup.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]")
|
||||
var request = &clouderrorreportingpb.GetGroupRequest{
|
||||
|
@ -221,7 +249,9 @@ func TestErrorGroupServiceGetGroupError(t *testing.T) {
|
|||
|
||||
resp, err := c.GetGroup(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -265,8 +295,8 @@ func TestErrorGroupServiceUpdateGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorGroupServiceUpdateGroupError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockErrorGroup.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorGroup.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{}
|
||||
var request = &clouderrorreportingpb.UpdateGroupRequest{
|
||||
|
@ -280,7 +310,9 @@ func TestErrorGroupServiceUpdateGroupError(t *testing.T) {
|
|||
|
||||
resp, err := c.UpdateGroup(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -337,8 +369,8 @@ func TestErrorStatsServiceListGroupStats(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorStatsServiceListGroupStatsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockErrorStats.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorStats.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{}
|
||||
|
@ -354,7 +386,9 @@ func TestErrorStatsServiceListGroupStatsError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListGroupStats(context.Background(), request).Next()
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -411,8 +445,8 @@ func TestErrorStatsServiceListEvents(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorStatsServiceListEventsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockErrorStats.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorStats.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var groupId string = "groupId506361563"
|
||||
|
@ -428,7 +462,9 @@ func TestErrorStatsServiceListEventsError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListEvents(context.Background(), request).Next()
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -467,8 +503,8 @@ func TestErrorStatsServiceDeleteEvents(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorStatsServiceDeleteEventsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockErrorStats.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorStats.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var request = &clouderrorreportingpb.DeleteEventsRequest{
|
||||
|
@ -482,7 +518,9 @@ func TestErrorStatsServiceDeleteEventsError(t *testing.T) {
|
|||
|
||||
resp, err := c.DeleteEvents(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -523,8 +561,8 @@ func TestReportErrorsServiceReportErrorEvent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestReportErrorsServiceReportErrorEventError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockReportErrors.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockReportErrors.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]")
|
||||
var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{}
|
||||
|
@ -540,7 +578,9 @@ func TestReportErrorsServiceReportErrorEventError(t *testing.T) {
|
|||
|
||||
resp, err := c.ReportErrorEvent(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
|
36
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
generated
vendored
36
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
generated
vendored
|
@ -17,8 +17,6 @@
|
|||
package errorreporting
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -26,7 +24,6 @@ import (
|
|||
"google.golang.org/api/transport"
|
||||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -41,26 +38,12 @@ type ReportErrorsCallOptions struct {
|
|||
func defaultReportErrorsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("clouderrorreporting.googleapis.com:443"),
|
||||
option.WithScopes(
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultReportErrorsCallOptions() *ReportErrorsCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "non_idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
retry := map[[2]string][]gax.CallOption{}
|
||||
return &ReportErrorsCallOptions{
|
||||
ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}],
|
||||
}
|
||||
|
@ -78,7 +61,7 @@ type ReportErrorsClient struct {
|
|||
CallOptions *ReportErrorsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader string
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewReportErrorsClient creates a new report errors service client.
|
||||
|
@ -115,8 +98,8 @@ func (c *ReportErrorsClient) Close() error {
|
|||
// use by Google-written clients.
|
||||
func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "")
|
||||
c.xGoogHeader = gax.XGoogHeader(kv...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// ReportErrorsProjectPath returns the path for the project resource.
|
||||
|
@ -138,14 +121,15 @@ func ReportErrorsProjectPath(project string) string {
|
|||
// for authentication. To use an API key, append it to the URL as the value of
|
||||
// a `key` parameter. For example:
|
||||
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
|
||||
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
|
||||
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)
|
||||
var resp *clouderrorreportingpb.ReportErrorEventResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req)
|
||||
resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ReportErrorEvent...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
4
vendor/cloud.google.com/go/errors/error_logging_test.go
generated
vendored
4
vendor/cloud.google.com/go/errors/error_logging_test.go
generated
vendored
|
@ -39,6 +39,10 @@ func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeLogger) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestClientUsingLogging(c *fakeLogger) *Client {
|
||||
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
return c, nil
|
||||
|
|
35
vendor/cloud.google.com/go/errors/errors.go
generated
vendored
35
vendor/cloud.google.com/go/errors/errors.go
generated
vendored
|
@ -91,6 +91,7 @@ import (
|
|||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/logging"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
|
@ -101,7 +102,8 @@ const (
|
|||
)
|
||||
|
||||
type apiInterface interface {
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest) (*erpb.ReportErrorEventResponse, error)
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
|
@ -115,6 +117,16 @@ var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (ap
|
|||
|
||||
type loggerInterface interface {
|
||||
LogSync(ctx context.Context, e logging.Entry) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
*logging.Logger
|
||||
c *logging.Client
|
||||
}
|
||||
|
||||
func (l logger) Close() error {
|
||||
return l.c.Close()
|
||||
}
|
||||
|
||||
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
|
@ -123,11 +135,12 @@ var newLoggerInterface = func(ctx context.Context, projectID string, opts ...opt
|
|||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
}
|
||||
l := lc.Logger("errorreports")
|
||||
return l, nil
|
||||
return logger{l, lc}, nil
|
||||
}
|
||||
|
||||
type sender interface {
|
||||
send(ctx context.Context, r *http.Request, message string)
|
||||
close() error
|
||||
}
|
||||
|
||||
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
|
||||
|
@ -142,6 +155,7 @@ type loggingSender struct {
|
|||
logger loggerInterface
|
||||
projectID string
|
||||
serviceContext map[string]string
|
||||
client *logging.Client
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
|
@ -193,6 +207,15 @@ func NewClient(ctx context.Context, projectID, serviceName, serviceVersion strin
|
|||
}
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
err := c.sender.close()
|
||||
c.sender = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// An Option is an optional argument to Catch.
|
||||
type Option interface {
|
||||
isOption()
|
||||
|
@ -359,6 +382,10 @@ func (s *loggingSender) send(ctx context.Context, r *http.Request, message strin
|
|||
}
|
||||
}
|
||||
|
||||
func (s *loggingSender) close() error {
|
||||
return s.client.Close()
|
||||
}
|
||||
|
||||
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
time := time.Now()
|
||||
var errorContext *erpb.ErrorContext
|
||||
|
@ -391,6 +418,10 @@ func (s *errorApiSender) send(ctx context.Context, r *http.Request, message stri
|
|||
}
|
||||
}
|
||||
|
||||
func (s *errorApiSender) close() error {
|
||||
return s.apiClient.Close()
|
||||
}
|
||||
|
||||
// chopStack trims a stack trace so that the function which panics or calls
|
||||
// Report is first.
|
||||
func chopStack(s []byte, isPanic bool) string {
|
||||
|
|
8
vendor/cloud.google.com/go/errors/errors_test.go
generated
vendored
8
vendor/cloud.google.com/go/errors/errors_test.go
generated
vendored
|
@ -21,6 +21,8 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
|
@ -33,7 +35,7 @@ type fakeReportErrorsClient struct {
|
|||
fail bool
|
||||
}
|
||||
|
||||
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest) (*erpb.ReportErrorEventResponse, error) {
|
||||
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
|
||||
if c.fail {
|
||||
return nil, errors.New("request failed")
|
||||
}
|
||||
|
@ -41,6 +43,10 @@ func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb
|
|||
return &erpb.ReportErrorEventResponse{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeReportErrorsClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestClient(c *fakeReportErrorsClient) *Client {
|
||||
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
return c, nil
|
||||
|
|
93
vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go
generated
vendored
93
vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go
generated
vendored
|
@ -1,93 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// concat_table is an example client of the bigquery client library.
|
||||
// It concatenates two BigQuery tables and writes the result to another table.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
|
||||
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
|
||||
src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate")
|
||||
src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate")
|
||||
dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to")
|
||||
pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
flagsOk := true
|
||||
for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} {
|
||||
if flag.Lookup(f).Value.String() == "" {
|
||||
fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
|
||||
flagsOk = false
|
||||
}
|
||||
}
|
||||
if !flagsOk {
|
||||
os.Exit(1)
|
||||
}
|
||||
if *src1 == *src2 || *src1 == *dest || *src2 == *dest {
|
||||
log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, *project)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating bigquery client: %v", err)
|
||||
}
|
||||
|
||||
s1 := client.Dataset(*dataset).Table(*src1)
|
||||
s2 := client.Dataset(*dataset).Table(*src2)
|
||||
d := client.Dataset(*dataset).Table(*dest)
|
||||
|
||||
// Concatenate data.
|
||||
copier := d.CopierFrom(s1, s2)
|
||||
copier.WriteDisposition = bigquery.WriteTruncate
|
||||
job, err := copier.Run(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Concatenating: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Job for concatenation operation: %+v\n", job)
|
||||
fmt.Printf("Waiting for job to complete.\n")
|
||||
|
||||
for range time.Tick(*pollint) {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("Failure determining status: %v", err)
|
||||
break
|
||||
}
|
||||
if !status.Done() {
|
||||
continue
|
||||
}
|
||||
if err := status.Err(); err == nil {
|
||||
fmt.Printf("Success\n")
|
||||
} else {
|
||||
fmt.Printf("Failure: %+v\n", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
95
vendor/cloud.google.com/go/examples/bigquery/load/main.go
generated
vendored
95
vendor/cloud.google.com/go/examples/bigquery/load/main.go
generated
vendored
|
@ -1,95 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// load is an example client of the bigquery client library.
|
||||
// It loads a file from Google Cloud Storage into a BigQuery table.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
|
||||
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
|
||||
table = flag.String("table", "", "The ID of a BigQuery table to load data into")
|
||||
bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from")
|
||||
object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket")
|
||||
skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading")
|
||||
pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
flagsOk := true
|
||||
for _, f := range []string{"project", "dataset", "table", "bucket", "object"} {
|
||||
if flag.Lookup(f).Value.String() == "" {
|
||||
fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
|
||||
flagsOk = false
|
||||
}
|
||||
}
|
||||
if !flagsOk {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, *project)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating bigquery client: %v", err)
|
||||
}
|
||||
|
||||
table := client.Dataset(*dataset).Table(*table)
|
||||
|
||||
gcs := bigquery.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object))
|
||||
gcs.SkipLeadingRows = *skiprows
|
||||
gcs.MaxBadRecords = 1
|
||||
gcs.AllowQuotedNewlines = true
|
||||
|
||||
// Load data from Google Cloud Storage into a BigQuery table.
|
||||
loader := table.LoaderFrom(gcs)
|
||||
loader.WriteDisposition = bigquery.WriteTruncate
|
||||
job, err := loader.Run(ctx)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Loading data: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Job for data load operation: %+v\n", job)
|
||||
fmt.Printf("Waiting for job to complete.\n")
|
||||
|
||||
for range time.Tick(*pollint) {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("Failure determining status: %v", err)
|
||||
break
|
||||
}
|
||||
if !status.Done() {
|
||||
continue
|
||||
}
|
||||
if err := status.Err(); err == nil {
|
||||
fmt.Printf("Success\n")
|
||||
} else {
|
||||
fmt.Printf("Failure: %+v\n", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
98
vendor/cloud.google.com/go/examples/bigquery/query/main.go
generated
vendored
98
vendor/cloud.google.com/go/examples/bigquery/query/main.go
generated
vendored
|
@ -1,98 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// query is an example client of the bigquery client library.
|
||||
// It submits a query and writes the result to a table.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
|
||||
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
|
||||
q = flag.String("q", "", "The query string")
|
||||
dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.")
|
||||
pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status")
|
||||
wait = flag.Bool("wait", false, "Whether to wait for the query job to complete.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
flagsOk := true
|
||||
for _, f := range []string{"project", "dataset", "q"} {
|
||||
if flag.Lookup(f).Value.String() == "" {
|
||||
fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f)
|
||||
flagsOk = false
|
||||
}
|
||||
}
|
||||
if !flagsOk {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, *project)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating bigquery client: %v", err)
|
||||
}
|
||||
|
||||
query := client.Query(*q)
|
||||
query.DefaultProjectID = *project
|
||||
query.DefaultDatasetID = *dataset
|
||||
query.WriteDisposition = bigquery.WriteTruncate
|
||||
|
||||
if *dest != "" {
|
||||
query.Dst = client.Dataset(*dataset).Table(*dest)
|
||||
}
|
||||
|
||||
// Query data.
|
||||
job, err := query.Run(ctx)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Querying: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Submitted query. Job ID: %s\n", job.ID())
|
||||
if !*wait {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Waiting for job to complete.\n")
|
||||
|
||||
for range time.Tick(*pollint) {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
fmt.Printf("Failure determining status: %v", err)
|
||||
break
|
||||
}
|
||||
if !status.Done() {
|
||||
continue
|
||||
}
|
||||
if err := status.Err(); err == nil {
|
||||
fmt.Printf("Success\n")
|
||||
} else {
|
||||
fmt.Printf("Failure: %+v\n", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
142
vendor/cloud.google.com/go/examples/bigquery/read/main.go
generated
vendored
142
vendor/cloud.google.com/go/examples/bigquery/read/main.go
generated
vendored
|
@ -1,142 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// read is an example client of the bigquery client library.
|
||||
// It reads from a table, returning the data via an Iterator.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
var (
|
||||
project = flag.String("project", "", "The ID of a Google Cloud Platform project")
|
||||
dataset = flag.String("dataset", "", "The ID of a BigQuery dataset")
|
||||
table = flag.String("table", ".*", "A regular expression to match the IDs of tables to read.")
|
||||
jobID = flag.String("jobid", "", "The ID of a query job that has already been submitted."+
|
||||
" If set, --dataset, --table will be ignored, and results will be read from the specified job.")
|
||||
)
|
||||
|
||||
func printValues(ctx context.Context, it *bigquery.RowIterator) {
|
||||
// one-space padding.
|
||||
tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||
|
||||
for {
|
||||
var vals []bigquery.Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("err calling get: %v\n", err)
|
||||
}
|
||||
sep := ""
|
||||
for _, v := range vals {
|
||||
fmt.Fprintf(tw, "%s%v", sep, v)
|
||||
sep = "\t"
|
||||
}
|
||||
fmt.Fprintf(tw, "\n")
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func printTable(ctx context.Context, client *bigquery.Client, t *bigquery.Table) {
|
||||
it := t.Read(ctx)
|
||||
id := t.FullyQualifiedName()
|
||||
fmt.Printf("%s\n%s\n", id, strings.Repeat("-", len(id)))
|
||||
printValues(ctx, it)
|
||||
}
|
||||
|
||||
func printQueryResults(ctx context.Context, client *bigquery.Client, queryJobID string) {
|
||||
job, err := client.JobFromID(ctx, queryJobID)
|
||||
if err != nil {
|
||||
log.Fatalf("Loading job: %v", err)
|
||||
}
|
||||
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading: %v", err)
|
||||
}
|
||||
|
||||
// TODO: print schema.
|
||||
printValues(ctx, it)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
flagsOk := true
|
||||
if flag.Lookup("project").Value.String() == "" {
|
||||
fmt.Fprintf(os.Stderr, "Flag --project is required\n")
|
||||
flagsOk = false
|
||||
}
|
||||
|
||||
var sourceFlagCount int
|
||||
if flag.Lookup("dataset").Value.String() != "" {
|
||||
sourceFlagCount++
|
||||
}
|
||||
if flag.Lookup("jobid").Value.String() != "" {
|
||||
sourceFlagCount++
|
||||
}
|
||||
if sourceFlagCount != 1 {
|
||||
fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n")
|
||||
flagsOk = false
|
||||
}
|
||||
|
||||
if !flagsOk {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tableRE, err := regexp.Compile(*table)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
client, err := bigquery.NewClient(ctx, *project)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating bigquery client: %v", err)
|
||||
}
|
||||
|
||||
if *jobID != "" {
|
||||
printQueryResults(ctx, client, *jobID)
|
||||
return
|
||||
}
|
||||
ds := client.Dataset(*dataset)
|
||||
tableIter := ds.Tables(context.Background())
|
||||
for {
|
||||
t, err := tableIter.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Listing tables: %v", err)
|
||||
}
|
||||
if tableRE.MatchString(t.TableID) {
|
||||
printTable(ctx, client, t)
|
||||
}
|
||||
}
|
||||
}
|
46
vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md
generated
vendored
46
vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
# Cloud Bigtable Hello World in Go
|
||||
|
||||
This is a simple application that demonstrates using the [Google Cloud APIs Go
|
||||
Client Library](https://github.com/GoogleCloudPlatform/google-cloud-go) to connect
|
||||
to and interact with Cloud Bigtable.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Set up Cloud Console.
|
||||
1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project.
|
||||
You will need the project ID later.
|
||||
1. Go to **Settings > Project Billing Settings** and enable billing.
|
||||
1. Select **APIs & Auth > APIs**.
|
||||
1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**.
|
||||
(You may need to search for the API).
|
||||
1. Set up gcloud.
|
||||
1. `gcloud components update`
|
||||
1. `gcloud auth login`
|
||||
1. `gcloud config set project PROJECT_ID`
|
||||
1. Provision a Cloud Bigtable instance
|
||||
1. Follow the instructions in the [user
|
||||
documentation](https://cloud.google.com/bigtable/docs/creating-instance) to
|
||||
create a Google Cloud Platform project and Cloud Bigtable instance if necessary.
|
||||
1. You'll need to reference your project id and instance id to run the application.
|
||||
|
||||
## Running
|
||||
|
||||
1. From the hello_world example folder, `go run main.go -project PROJECT_ID -instance INSTANCE_ID`, substituting your project id and instance id.
|
||||
|
||||
## Cleaning up
|
||||
|
||||
To avoid incurring extra charges to your Google Cloud Platform account, remove
|
||||
the resources created for this sample.
|
||||
|
||||
1. Go to the Clusters page in the [Cloud
|
||||
Console](https://console.cloud.google.com).
|
||||
|
||||
[Go to the Clusters page](https://console.cloud.google.com/project/_/bigtable/clusters)
|
||||
|
||||
1. Click the cluster name.
|
||||
|
||||
1. Click **Delete**.
|
||||
|
||||

|
||||
|
||||
1. Type the cluster ID, then click **Delete** to delete the cluster.
|
157
vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go
generated
vendored
157
vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go
generated
vendored
|
@ -1,157 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Hello world is a sample program demonstrating use of the Bigtable client
|
||||
// library to perform basic CRUD operations
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// User-provided constants.
|
||||
const (
|
||||
tableName = "Hello-Bigtable"
|
||||
columnFamilyName = "cf1"
|
||||
columnName = "greeting"
|
||||
)
|
||||
|
||||
var greetings = []string{"Hello World!", "Hello Cloud Bigtable!", "Hello golang!"}
|
||||
|
||||
// sliceContains reports whether the provided string is present in the given slice of strings.
|
||||
func sliceContains(list []string, target string) bool {
|
||||
for _, s := range list {
|
||||
if s == target {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func main() {
|
||||
project := flag.String("project", "", "The Google Cloud Platform project ID. Required.")
|
||||
instance := flag.String("instance", "", "The Google Cloud Bigtable instance ID. Required.")
|
||||
flag.Parse()
|
||||
|
||||
for _, f := range []string{"project", "instance"} {
|
||||
if flag.Lookup(f).Value.String() == "" {
|
||||
log.Fatalf("The %s flag is required.", f)
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up admin client, tables, and column families.
|
||||
// NewAdminClient uses Application Default Credentials to authenticate.
|
||||
adminClient, err := bigtable.NewAdminClient(ctx, *project, *instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create admin client: %v", err)
|
||||
}
|
||||
|
||||
tables, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch table list: %v", err)
|
||||
}
|
||||
|
||||
if !sliceContains(tables, tableName) {
|
||||
log.Printf("Creating table %s", tableName)
|
||||
if err := adminClient.CreateTable(ctx, tableName); err != nil {
|
||||
log.Fatalf("Could not create table %s: %v", tableName, err)
|
||||
}
|
||||
}
|
||||
|
||||
tblInfo, err := adminClient.TableInfo(ctx, tableName)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read info for table %s: %v", tableName, err)
|
||||
}
|
||||
|
||||
if !sliceContains(tblInfo.Families, columnFamilyName) {
|
||||
if err := adminClient.CreateColumnFamily(ctx, tableName, columnFamilyName); err != nil {
|
||||
log.Fatalf("Could not create column family %s: %v", columnFamilyName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set up Bigtable data operations client.
|
||||
// NewClient uses Application Default Credentials to authenticate.
|
||||
client, err := bigtable.NewClient(ctx, *project, *instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create data operations client: %v", err)
|
||||
}
|
||||
|
||||
tbl := client.Open(tableName)
|
||||
muts := make([]*bigtable.Mutation, len(greetings))
|
||||
rowKeys := make([]string, len(greetings))
|
||||
|
||||
log.Printf("Writing greeting rows to table")
|
||||
for i, greeting := range greetings {
|
||||
muts[i] = bigtable.NewMutation()
|
||||
muts[i].Set(columnFamilyName, columnName, bigtable.Now(), []byte(greeting))
|
||||
|
||||
// Each row has a unique row key.
|
||||
//
|
||||
// Note: This example uses sequential numeric IDs for simplicity, but
|
||||
// this can result in poor performance in a production application.
|
||||
// Since rows are stored in sorted order by key, sequential keys can
|
||||
// result in poor distribution of operations across nodes.
|
||||
//
|
||||
// For more information about how to design a Bigtable schema for the
|
||||
// best performance, see the documentation:
|
||||
//
|
||||
// https://cloud.google.com/bigtable/docs/schema-design
|
||||
rowKeys[i] = fmt.Sprintf("%s%d", columnName, i)
|
||||
}
|
||||
|
||||
rowErrs, err := tbl.ApplyBulk(ctx, rowKeys, muts)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not apply bulk row mutation: %v", err)
|
||||
}
|
||||
if rowErrs != nil {
|
||||
for _, rowErr := range rowErrs {
|
||||
log.Printf("Error writing row: %v", rowErr)
|
||||
}
|
||||
log.Fatalf("Could not write some rows")
|
||||
}
|
||||
|
||||
log.Printf("Getting a single greeting by row key:")
|
||||
row, err := tbl.ReadRow(ctx, rowKeys[0], bigtable.RowFilter(bigtable.ColumnFilter(columnName)))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read row with key %s: %v", rowKeys[0], err)
|
||||
}
|
||||
log.Printf("\t%s = %s\n", rowKeys[0], string(row[columnFamilyName][0].Value))
|
||||
|
||||
log.Printf("Reading all greeting rows:")
|
||||
err = tbl.ReadRows(ctx, bigtable.PrefixRange(columnName), func(row bigtable.Row) bool {
|
||||
item := row[columnFamilyName][0]
|
||||
log.Printf("\t%s = %s\n", item.Row, string(item.Value))
|
||||
return true
|
||||
}, bigtable.RowFilter(bigtable.ColumnFilter(columnName)))
|
||||
|
||||
if err = client.Close(); err != nil {
|
||||
log.Fatalf("Could not close data operations client: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("Deleting the table")
|
||||
if err = adminClient.DeleteTable(ctx, tableName); err != nil {
|
||||
log.Fatalf("Could not delete table %s: %v", tableName, err)
|
||||
}
|
||||
|
||||
if err = adminClient.Close(); err != nil {
|
||||
log.Fatalf("Could not close admin client: %v", err)
|
||||
}
|
||||
}
|
453
vendor/cloud.google.com/go/examples/bigtable/search/search.go
generated
vendored
453
vendor/cloud.google.com/go/examples/bigtable/search/search.go
generated
vendored
|
@ -1,453 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Search is a sample web server that uses Cloud Bigtable as the storage layer
|
||||
// for a simple document-storage and full-text-search service.
|
||||
// It has four functions:
|
||||
// - Initialize and clear the table.
|
||||
// - Add a document. This adds the content of a user-supplied document to the
|
||||
// Bigtable, and adds references to the document to an index in the Bigtable.
|
||||
// The document is indexed under each unique word in the document.
|
||||
// - Search the index. This returns documents containing each word in a user
|
||||
// query, with snippets and links to view the whole document.
|
||||
// - Copy table. This copies the documents and index from another table and
|
||||
// adds them to the current one.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
addTemplate = template.Must(template.New("").Parse(`<html><body>
|
||||
Added {{.Title}}
|
||||
</body></html>`))
|
||||
|
||||
contentTemplate = template.Must(template.New("").Parse(`<html><body>
|
||||
<b>{{.Title}}</b><br><br>
|
||||
{{.Content}}
|
||||
</body></html>`))
|
||||
|
||||
searchTemplate = template.Must(template.New("").Parse(`<html><body>
|
||||
Results for <b>{{.Query}}</b>:<br><br>
|
||||
{{range .Results}}
|
||||
<a href="/content?name={{.Title}}">{{.Title}}</a><br>
|
||||
<i>{{.Snippet}}</i><br><br>
|
||||
{{end}}
|
||||
</body></html>`))
|
||||
)
|
||||
|
||||
const (
|
||||
indexColumnFamily = "i"
|
||||
contentColumnFamily = "c"
|
||||
mainPage = `
|
||||
<html>
|
||||
<head>
|
||||
<title>Document Search</title>
|
||||
</head>
|
||||
<body>
|
||||
Initialize and clear table:
|
||||
<form action="/reset" method="post">
|
||||
<div><input type="submit" value="Init"></div>
|
||||
</form>
|
||||
|
||||
Search for documents:
|
||||
<form action="/search" method="post">
|
||||
<div><input type="text" name="q" size=80></div>
|
||||
<div><input type="submit" value="Search"></div>
|
||||
</form>
|
||||
|
||||
Add a document:
|
||||
<form action="/add" method="post">
|
||||
Document name:
|
||||
<div><textarea name="name" rows="1" cols="80"></textarea></div>
|
||||
Document text:
|
||||
<div><textarea name="content" rows="20" cols="80"></textarea></div>
|
||||
<div><input type="submit" value="Submit"></div>
|
||||
</form>
|
||||
|
||||
Copy data from another table:
|
||||
<form action="/copy" method="post">
|
||||
Source table name:
|
||||
<div><input type="text" name="name" size=80></div>
|
||||
<div><input type="submit" value="Copy"></div>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
project = flag.String("project", "", "The name of the project.")
|
||||
instance = flag.String("instance", "", "The name of the Cloud Bigtable instance.")
|
||||
tableName = flag.String("table", "docindex", "The name of the table containing the documents and index.")
|
||||
port = flag.Int("port", 8080, "TCP port for server.")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
// Make an admin client.
|
||||
adminClient, err := bigtable.NewAdminClient(context.Background(), *project, *instance)
|
||||
if err != nil {
|
||||
log.Fatal("Bigtable NewAdminClient:", err)
|
||||
}
|
||||
|
||||
// Make a regular client.
|
||||
client, err := bigtable.NewClient(context.Background(), *project, *instance)
|
||||
if err != nil {
|
||||
log.Fatal("Bigtable NewClient:", err)
|
||||
}
|
||||
|
||||
// Open the table.
|
||||
table := client.Open(*tableName)
|
||||
|
||||
// Set up HTML handlers, and start the web server.
|
||||
http.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) { handleSearch(w, r, table) })
|
||||
http.HandleFunc("/content", func(w http.ResponseWriter, r *http.Request) { handleContent(w, r, table) })
|
||||
http.HandleFunc("/add", func(w http.ResponseWriter, r *http.Request) { handleAddDoc(w, r, table) })
|
||||
http.HandleFunc("/reset", func(w http.ResponseWriter, r *http.Request) { handleReset(w, r, *tableName, adminClient) })
|
||||
http.HandleFunc("/copy", func(w http.ResponseWriter, r *http.Request) { handleCopy(w, r, *tableName, client, adminClient) })
|
||||
http.HandleFunc("/", handleMain)
|
||||
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*port), nil))
|
||||
}
|
||||
|
||||
// handleMain outputs the home page.
|
||||
func handleMain(w http.ResponseWriter, r *http.Request) {
|
||||
io.WriteString(w, mainPage)
|
||||
}
|
||||
|
||||
// tokenize splits a string into tokens.
|
||||
// This is very simple, it's not a good tokenization function.
|
||||
func tokenize(s string) []string {
|
||||
wordMap := make(map[string]bool)
|
||||
f := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) })
|
||||
for _, word := range f {
|
||||
word = strings.ToLower(word)
|
||||
wordMap[word] = true
|
||||
}
|
||||
words := make([]string, 0, len(wordMap))
|
||||
for word := range wordMap {
|
||||
words = append(words, word)
|
||||
}
|
||||
return words
|
||||
}
|
||||
|
||||
// handleContent fetches the content of a document from the Bigtable and returns it.
|
||||
func handleContent(w http.ResponseWriter, r *http.Request, table *bigtable.Table) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
name := r.FormValue("name")
|
||||
if len(name) == 0 {
|
||||
http.Error(w, "No document name supplied.", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
row, err := table.ReadRow(ctx, name)
|
||||
if err != nil {
|
||||
http.Error(w, "Error reading content: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
content := row[contentColumnFamily]
|
||||
if len(content) == 0 {
|
||||
http.Error(w, "Document not found.", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := contentTemplate.ExecuteTemplate(&buf, "", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil {
|
||||
http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
io.Copy(w, &buf)
|
||||
}
|
||||
|
||||
// handleSearch responds to search queries, returning links and snippets for matching documents.
|
||||
func handleSearch(w http.ResponseWriter, r *http.Request, table *bigtable.Table) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
query := r.FormValue("q")
|
||||
// Split the query into words.
|
||||
words := tokenize(query)
|
||||
if len(words) == 0 {
|
||||
http.Error(w, "Empty query.", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// readRows reads from many rows concurrently.
|
||||
readRows := func(rows []string) ([]bigtable.Row, error) {
|
||||
results := make([]bigtable.Row, len(rows))
|
||||
errors := make([]error, len(rows))
|
||||
var wg sync.WaitGroup
|
||||
for i, row := range rows {
|
||||
wg.Add(1)
|
||||
go func(i int, row string) {
|
||||
defer wg.Done()
|
||||
results[i], errors[i] = table.ReadRow(ctx, row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
|
||||
}(i, row)
|
||||
}
|
||||
wg.Wait()
|
||||
for _, err := range errors {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// For each query word, get the list of documents containing it.
|
||||
results, err := readRows(words)
|
||||
if err != nil {
|
||||
http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Count how many of the query words each result contained.
|
||||
hits := make(map[string]int)
|
||||
for _, r := range results {
|
||||
for _, r := range r[indexColumnFamily] {
|
||||
hits[r.Column]++
|
||||
}
|
||||
}
|
||||
|
||||
// Build a slice of all the documents that matched every query word.
|
||||
var matches []string
|
||||
for doc, count := range hits {
|
||||
if count == len(words) {
|
||||
matches = append(matches, doc[len(indexColumnFamily+":"):])
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the content of those documents from the Bigtable.
|
||||
content, err := readRows(matches)
|
||||
if err != nil {
|
||||
http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
type result struct{ Title, Snippet string }
|
||||
data := struct {
|
||||
Query string
|
||||
Results []result
|
||||
}{query, nil}
|
||||
|
||||
// Output links and snippets.
|
||||
for i, doc := range matches {
|
||||
var text string
|
||||
c := content[i][contentColumnFamily]
|
||||
if len(c) > 0 {
|
||||
text = string(c[0].Value)
|
||||
}
|
||||
if len(text) > 100 {
|
||||
text = text[:100] + "..."
|
||||
}
|
||||
data.Results = append(data.Results, result{doc, text})
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil {
|
||||
http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
io.Copy(w, &buf)
|
||||
}
|
||||
|
||||
// handleAddDoc adds a document to the index.
|
||||
func handleAddDoc(w http.ResponseWriter, r *http.Request, table *bigtable.Table) {
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
|
||||
|
||||
name := r.FormValue("name")
|
||||
if len(name) == 0 {
|
||||
http.Error(w, "Empty document name!", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
content := r.FormValue("content")
|
||||
if len(content) == 0 {
|
||||
http.Error(w, "Empty document content!", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
writeErr error // Set if any write fails.
|
||||
mu sync.Mutex // Protects writeErr
|
||||
wg sync.WaitGroup // Used to wait for all writes to finish.
|
||||
)
|
||||
|
||||
// writeOneColumn writes one column in one row, updates err if there is an error,
|
||||
// and signals wg that one operation has finished.
|
||||
writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) {
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set(family, column, ts, []byte(value))
|
||||
err := table.Apply(ctx, row, mut)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
writeErr = err
|
||||
mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Start a write to store the document content.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now())
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Start writes to store the document name in the index for each word in the document.
|
||||
words := tokenize(content)
|
||||
for _, word := range words {
|
||||
var (
|
||||
row = word
|
||||
family = indexColumnFamily
|
||||
column = name
|
||||
value = ""
|
||||
ts = bigtable.Now()
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// TODO: should use a semaphore to limit the number of concurrent writes.
|
||||
writeOneColumn(row, family, column, value, ts)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
if writeErr != nil {
|
||||
http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil {
|
||||
http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
io.Copy(w, &buf)
|
||||
}
|
||||
|
||||
// handleReset deletes the table if it exists, creates it again, and creates its column families.
|
||||
func handleReset(w http.ResponseWriter, r *http.Request, table string, adminClient *bigtable.AdminClient) {
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
adminClient.DeleteTable(ctx, table)
|
||||
if err := adminClient.CreateTable(ctx, table); err != nil {
|
||||
http.Error(w, "Error creating Bigtable: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
time.Sleep(20 * time.Second)
|
||||
// Create two column families, and set the GC policy for each one to keep one version.
|
||||
for _, family := range []string{indexColumnFamily, contentColumnFamily} {
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, family); err != nil {
|
||||
http.Error(w, "Error creating column family: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := adminClient.SetGCPolicy(ctx, table, family, bigtable.MaxVersionsPolicy(1)); err != nil {
|
||||
http.Error(w, "Error setting GC policy: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.Write([]byte("<html><body>Done.</body></html>"))
|
||||
return
|
||||
}
|
||||
|
||||
// copyTable copies data from one table to another.
|
||||
func copyTable(src, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) error {
|
||||
if src == "" || src == dst {
|
||||
return nil
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
|
||||
|
||||
// Open the source and destination tables.
|
||||
srcTable := client.Open(src)
|
||||
dstTable := client.Open(dst)
|
||||
|
||||
var (
|
||||
writeErr error // Set if any write fails.
|
||||
mu sync.Mutex // Protects writeErr
|
||||
wg sync.WaitGroup // Used to wait for all writes to finish.
|
||||
)
|
||||
copyRowToTable := func(row bigtable.Row) bool {
|
||||
mu.Lock()
|
||||
failed := writeErr != nil
|
||||
mu.Unlock()
|
||||
if failed {
|
||||
return false
|
||||
}
|
||||
mut := bigtable.NewMutation()
|
||||
for family, items := range row {
|
||||
for _, item := range items {
|
||||
// Get the column name, excluding the column family name and ':' character.
|
||||
columnWithoutFamily := item.Column[len(family)+1:]
|
||||
mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value)
|
||||
}
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
// TODO: should use a semaphore to limit the number of concurrent writes.
|
||||
if err := dstTable.Apply(ctx, row.Key(), mut); err != nil {
|
||||
mu.Lock()
|
||||
writeErr = err
|
||||
mu.Unlock()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
// Create a filter that only accepts the column families we're interested in.
|
||||
filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily)
|
||||
// Read every row from srcTable, and call copyRowToTable to copy it to our table.
|
||||
err := srcTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter))
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeErr
|
||||
}
|
||||
|
||||
// handleCopy copies data from one table to another.
|
||||
func handleCopy(w http.ResponseWriter, r *http.Request, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) {
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
src := r.FormValue("name")
|
||||
if src == "" {
|
||||
http.Error(w, "No source table specified.", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := copyTable(src, dst, client, adminClient); err != nil {
|
||||
http.Error(w, "Failed to rebuild index: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
fmt.Fprint(w, "Copied table.\n")
|
||||
}
|
29
vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md
generated
vendored
29
vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
# User Counter
|
||||
# (Cloud Bigtable on Managed VMs using Go)
|
||||
|
||||
This app counts how often each user visits. The app uses Cloud Bigtable to store the visit counts for each user.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Set up Cloud Console.
|
||||
1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project.
|
||||
You will need the project ID later.
|
||||
1. Go to **Settings > Project Billing Settings** and enable billing.
|
||||
1. Select **APIs & Auth > APIs**.
|
||||
1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**.
|
||||
(You may need to search for the API).
|
||||
1. Set up gcloud.
|
||||
1. `gcloud components update`
|
||||
1. `gcloud auth login`
|
||||
1. `gcloud config set project PROJECT_ID`
|
||||
1. Download App Engine SDK for Go.
|
||||
1. `go get -u google.golang.org/appengine/...`
|
||||
1. In main.go, change the `project` and `instance` constants.
|
||||
|
||||
## Running locally
|
||||
|
||||
1. From the sample project folder, `dev_appserver.py app.yaml`.
|
||||
|
||||
## Deploying on Google App Engine flexible environment
|
||||
|
||||
Follow the [deployment instructions](https://cloud.google.com/appengine/docs/flexible/go/testing-and-deploying-your-app).
|
11
vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml
generated
vendored
11
vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml
generated
vendored
|
@ -1,11 +0,0 @@
|
|||
runtime: go
|
||||
api_version: go1
|
||||
vm: true
|
||||
|
||||
manual_scaling:
|
||||
instances: 1
|
||||
|
||||
handlers:
|
||||
# Serve only the web root.
|
||||
- url: /
|
||||
script: _go_app
|
180
vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go
generated
vendored
180
vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go
generated
vendored
|
@ -1,180 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
User counter is a program that tracks how often a user has visited the index page.
|
||||
|
||||
This program demonstrates usage of the Cloud Bigtable API for App Engine flexible environment and Go.
|
||||
Instructions for running this program are in the README.md.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"html/template"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/appengine"
|
||||
aelog "google.golang.org/appengine/log"
|
||||
"google.golang.org/appengine/user"
|
||||
)
|
||||
|
||||
// User-provided constants.
|
||||
const (
|
||||
project = "PROJECT_ID"
|
||||
instance = "INSTANCE"
|
||||
)
|
||||
|
||||
var (
|
||||
tableName = "user-visit-counter"
|
||||
familyName = "emails"
|
||||
|
||||
// Client is initialized by main.
|
||||
client *bigtable.Client
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up admin client, tables, and column families.
|
||||
// NewAdminClient uses Application Default Credentials to authenticate.
|
||||
adminClient, err := bigtable.NewAdminClient(ctx, project, instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create a table admin client. %v", err)
|
||||
}
|
||||
tables, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to fetch table list. %v", err)
|
||||
}
|
||||
if !sliceContains(tables, tableName) {
|
||||
if err := adminClient.CreateTable(ctx, tableName); err != nil {
|
||||
log.Fatalf("Unable to create table: %v. %v", tableName, err)
|
||||
}
|
||||
}
|
||||
tblInfo, err := adminClient.TableInfo(ctx, tableName)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to read info for table: %v. %v", tableName, err)
|
||||
}
|
||||
if !sliceContains(tblInfo.Families, familyName) {
|
||||
if err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil {
|
||||
log.Fatalf("Unable to create column family: %v. %v", familyName, err)
|
||||
}
|
||||
}
|
||||
adminClient.Close()
|
||||
|
||||
// Set up Bigtable data operations client.
|
||||
// NewClient uses Application Default Credentials to authenticate.
|
||||
client, err = bigtable.NewClient(ctx, project, instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create data operations client. %v", err)
|
||||
}
|
||||
|
||||
http.Handle("/", appHandler(mainHandler))
|
||||
appengine.Main() // Never returns.
|
||||
}
|
||||
|
||||
// mainHandler tracks how many times each user has visited this page.
|
||||
func mainHandler(w http.ResponseWriter, r *http.Request) *appError {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx := appengine.NewContext(r)
|
||||
u := user.Current(ctx)
|
||||
if u == nil {
|
||||
login, err := user.LoginURL(ctx, r.URL.String())
|
||||
if err != nil {
|
||||
return &appError{err, "Error finding login URL", http.StatusInternalServerError}
|
||||
}
|
||||
http.Redirect(w, r, login, http.StatusFound)
|
||||
return nil
|
||||
}
|
||||
logoutURL, err := user.LogoutURL(ctx, "/")
|
||||
if err != nil {
|
||||
return &appError{err, "Error finding logout URL", http.StatusInternalServerError}
|
||||
}
|
||||
|
||||
// Increment visit count for user.
|
||||
tbl := client.Open(tableName)
|
||||
rmw := bigtable.NewReadModifyWrite()
|
||||
rmw.Increment(familyName, u.Email, 1)
|
||||
row, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw)
|
||||
if err != nil {
|
||||
return &appError{err, "Error applying ReadModifyWrite to row: " + u.Email, http.StatusInternalServerError}
|
||||
}
|
||||
data := struct {
|
||||
Username, Logout string
|
||||
Visits uint64
|
||||
}{
|
||||
Username: u.Email,
|
||||
// Retrieve the most recently edited column.
|
||||
Visits: binary.BigEndian.Uint64(row[familyName][0].Value),
|
||||
Logout: logoutURL,
|
||||
}
|
||||
|
||||
// Display hello page.
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return &appError{err, "Error writing template", http.StatusInternalServerError}
|
||||
}
|
||||
buf.WriteTo(w)
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmpl = template.Must(template.New("").Parse(`
|
||||
<html><body>
|
||||
|
||||
<p>
|
||||
{{with .Username}} Hello {{.}}{{end}}
|
||||
{{with .Logout}}<a href="{{.}}">Sign out</a>{{end}}
|
||||
|
||||
</p>
|
||||
|
||||
<p>
|
||||
You have visited {{.Visits}}
|
||||
</p>
|
||||
|
||||
</body></html>`))
|
||||
|
||||
// sliceContains reports whether the provided string is present in the given slice of strings.
|
||||
func sliceContains(list []string, target string) bool {
|
||||
for _, s := range list {
|
||||
if s == target {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// More info about this method of error handling can be found at: http://blog.golang.org/error-handling-and-go
|
||||
type appHandler func(http.ResponseWriter, *http.Request) *appError
|
||||
|
||||
type appError struct {
|
||||
Error error
|
||||
Message string
|
||||
Code int
|
||||
}
|
||||
|
||||
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if e := fn(w, r); e != nil {
|
||||
ctx := appengine.NewContext(r)
|
||||
aelog.Errorf(ctx, "%v", e.Error)
|
||||
http.Error(w, e.Message, e.Code)
|
||||
}
|
||||
}
|
428
vendor/cloud.google.com/go/examples/storage/appengine/app.go
generated
vendored
428
vendor/cloud.google.com/go/examples/storage/appengine/app.go
generated
vendored
|
@ -1,428 +0,0 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//[START sample]
|
||||
// Package gcsdemo is an example App Engine app using the Google Cloud Storage API.
|
||||
package gcsdemo
|
||||
|
||||
//[START imports]
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/appengine"
|
||||
"google.golang.org/appengine/file"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
//[END imports]
|
||||
|
||||
func init() {
|
||||
http.HandleFunc("/", handler)
|
||||
}
|
||||
|
||||
// demo struct holds information needed to run the various demo functions.
|
||||
type demo struct {
|
||||
client *storage.Client
|
||||
bucketName string
|
||||
bucket *storage.BucketHandle
|
||||
|
||||
w io.Writer
|
||||
ctx context.Context
|
||||
// cleanUp is a list of filenames that need cleaning up at the end of the demo.
|
||||
cleanUp []string
|
||||
// failed indicates that one or more of the demo steps failed.
|
||||
failed bool
|
||||
}
|
||||
|
||||
func (d *demo) errorf(format string, args ...interface{}) {
|
||||
d.failed = true
|
||||
fmt.Fprintln(d.w, fmt.Sprintf(format, args...))
|
||||
log.Errorf(d.ctx, format, args...)
|
||||
}
|
||||
|
||||
// handler is the main demo entry point that calls the GCS operations.
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := appengine.NewContext(r)
|
||||
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
//[START get_default_bucket]
|
||||
// Use `dev_appserver.py --default_gcs_bucket_name GCS_BUCKET_NAME`
|
||||
// when running locally.
|
||||
bucket, err := file.DefaultBucketName(ctx)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to get default GCS bucket name: %v", err)
|
||||
}
|
||||
//[END get_default_bucket]
|
||||
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to create client: %v", err)
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(ctx))
|
||||
fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
d := &demo{
|
||||
w: buf,
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
bucket: client.Bucket(bucket),
|
||||
bucketName: bucket,
|
||||
}
|
||||
|
||||
n := "demo-testfile-go"
|
||||
d.createFile(n)
|
||||
d.readFile(n)
|
||||
d.copyFile(n)
|
||||
d.statFile(n)
|
||||
d.createListFiles()
|
||||
d.listBucket()
|
||||
d.listBucketDirMode()
|
||||
d.defaultACL()
|
||||
d.putDefaultACLRule()
|
||||
d.deleteDefaultACLRule()
|
||||
d.bucketACL()
|
||||
d.putBucketACLRule()
|
||||
d.deleteBucketACLRule()
|
||||
d.acl(n)
|
||||
d.putACLRule(n)
|
||||
d.deleteACLRule(n)
|
||||
d.deleteFiles()
|
||||
|
||||
if d.failed {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
buf.WriteTo(w)
|
||||
fmt.Fprintf(w, "\nDemo failed.\n")
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
buf.WriteTo(w)
|
||||
fmt.Fprintf(w, "\nDemo succeeded.\n")
|
||||
}
|
||||
}
|
||||
|
||||
//[START write]
|
||||
// createFile creates a file in Google Cloud Storage.
|
||||
func (d *demo) createFile(fileName string) {
|
||||
fmt.Fprintf(d.w, "Creating file /%v/%v\n", d.bucketName, fileName)
|
||||
|
||||
wc := d.bucket.Object(fileName).NewWriter(d.ctx)
|
||||
wc.ContentType = "text/plain"
|
||||
wc.Metadata = map[string]string{
|
||||
"x-goog-meta-foo": "foo",
|
||||
"x-goog-meta-bar": "bar",
|
||||
}
|
||||
d.cleanUp = append(d.cleanUp, fileName)
|
||||
|
||||
if _, err := wc.Write([]byte("abcde\n")); err != nil {
|
||||
d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil {
|
||||
d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
if err := wc.Close(); err != nil {
|
||||
d.errorf("createFile: unable to close bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//[END write]
|
||||
|
||||
//[START read]
|
||||
// readFile reads the named file in Google Cloud Storage.
|
||||
func (d *demo) readFile(fileName string) {
|
||||
io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n")
|
||||
|
||||
rc, err := d.bucket.Object(fileName).NewReader(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("readFile: unable to open file from bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
slurp, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
d.errorf("readFile: unable to read data from bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0])
|
||||
if len(slurp) > 1024 {
|
||||
fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:])
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%s\n", slurp)
|
||||
}
|
||||
}
|
||||
|
||||
//[END read]
|
||||
|
||||
//[START copy]
|
||||
// copyFile copies a file in Google Cloud Storage.
|
||||
func (d *demo) copyFile(fileName string) {
|
||||
copyName := fileName + "-copy"
|
||||
fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", d.bucketName, fileName, d.bucketName, copyName)
|
||||
|
||||
obj, err := d.bucket.Object(copyName).CopierFrom(d.bucket.Object(fileName)).Run(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", d.bucketName, fileName, d.bucketName, copyName, err)
|
||||
return
|
||||
}
|
||||
d.cleanUp = append(d.cleanUp, copyName)
|
||||
|
||||
d.dumpStats(obj)
|
||||
}
|
||||
|
||||
//[END copy]
|
||||
|
||||
func (d *demo) dumpStats(obj *storage.ObjectAttrs) {
|
||||
fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name)
|
||||
fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType)
|
||||
fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL)
|
||||
fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner)
|
||||
fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding)
|
||||
fmt.Fprintf(d.w, "Size: %v, ", obj.Size)
|
||||
fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5)
|
||||
fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C)
|
||||
fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata)
|
||||
fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink)
|
||||
fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass)
|
||||
if !obj.Deleted.IsZero() {
|
||||
fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted)
|
||||
}
|
||||
fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated)
|
||||
}
|
||||
|
||||
//[START file_metadata]
|
||||
// statFile reads the stats of the named file in Google Cloud Storage.
|
||||
func (d *demo) statFile(fileName string) {
|
||||
io.WriteString(d.w, "\nFile stat:\n")
|
||||
|
||||
obj, err := d.bucket.Object(fileName).Attrs(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
|
||||
d.dumpStats(obj)
|
||||
}
|
||||
|
||||
//[END file_metadata]
|
||||
|
||||
// createListFiles creates files that will be used by listBucket.
|
||||
func (d *demo) createListFiles() {
|
||||
io.WriteString(d.w, "\nCreating more files for listbucket...\n")
|
||||
for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} {
|
||||
d.createFile(n)
|
||||
}
|
||||
}
|
||||
|
||||
//[START list_bucket]
|
||||
// listBucket lists the contents of a bucket in Google Cloud Storage.
|
||||
func (d *demo) listBucket() {
|
||||
io.WriteString(d.w, "\nListbucket result:\n")
|
||||
|
||||
query := &storage.Query{Prefix: "foo"}
|
||||
it := d.bucket.Objects(d.ctx, query)
|
||||
for {
|
||||
obj, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
d.errorf("listBucket: unable to list bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpStats(obj)
|
||||
}
|
||||
}
|
||||
|
||||
//[END list_bucket]
|
||||
|
||||
func (d *demo) listDir(name, indent string) {
|
||||
query := &storage.Query{Prefix: name, Delimiter: "/"}
|
||||
it := d.bucket.Objects(d.ctx, query)
|
||||
for {
|
||||
obj, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
d.errorf("listBucketDirMode: unable to list bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
if obj.Prefix == "" {
|
||||
fmt.Fprint(d.w, indent)
|
||||
d.dumpStats(obj)
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, d.bucketName, obj.Prefix)
|
||||
d.listDir(obj.Prefix, indent+" ")
|
||||
}
|
||||
}
|
||||
|
||||
// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage.
|
||||
func (d *demo) listBucketDirMode() {
|
||||
io.WriteString(d.w, "\nListbucket directory mode result:\n")
|
||||
d.listDir("b", "")
|
||||
}
|
||||
|
||||
// dumpDefaultACL prints out the default object ACL for this bucket.
|
||||
func (d *demo) dumpDefaultACL() {
|
||||
acl, err := d.bucket.ACL().List(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
for _, v := range acl {
|
||||
fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role)
|
||||
}
|
||||
}
|
||||
|
||||
// defaultACL displays the default object ACL for this bucket.
|
||||
func (d *demo) defaultACL() {
|
||||
io.WriteString(d.w, "\nDefault object ACL:\n")
|
||||
d.dumpDefaultACL()
|
||||
}
|
||||
|
||||
// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket.
|
||||
func (d *demo) putDefaultACLRule() {
|
||||
io.WriteString(d.w, "\nPut Default object ACL Rule:\n")
|
||||
err := d.bucket.DefaultObjectACL().Set(d.ctx, storage.AllUsers, storage.RoleReader)
|
||||
if err != nil {
|
||||
d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpDefaultACL()
|
||||
}
|
||||
|
||||
// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket.
|
||||
func (d *demo) deleteDefaultACLRule() {
|
||||
io.WriteString(d.w, "\nDelete Default object ACL Rule:\n")
|
||||
err := d.bucket.DefaultObjectACL().Delete(d.ctx, storage.AllUsers)
|
||||
if err != nil {
|
||||
d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpDefaultACL()
|
||||
}
|
||||
|
||||
// dumpBucketACL prints out the bucket ACL.
|
||||
func (d *demo) dumpBucketACL() {
|
||||
acl, err := d.bucket.ACL().List(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
for _, v := range acl {
|
||||
fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role)
|
||||
}
|
||||
}
|
||||
|
||||
// bucketACL displays the bucket ACL for this bucket.
|
||||
func (d *demo) bucketACL() {
|
||||
io.WriteString(d.w, "\nBucket ACL:\n")
|
||||
d.dumpBucketACL()
|
||||
}
|
||||
|
||||
// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket.
|
||||
func (d *demo) putBucketACLRule() {
|
||||
io.WriteString(d.w, "\nPut Bucket ACL Rule:\n")
|
||||
err := d.bucket.ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader)
|
||||
if err != nil {
|
||||
d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpBucketACL()
|
||||
}
|
||||
|
||||
// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket.
|
||||
func (d *demo) deleteBucketACLRule() {
|
||||
io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n")
|
||||
err := d.bucket.ACL().Delete(d.ctx, storage.AllUsers)
|
||||
if err != nil {
|
||||
d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpBucketACL()
|
||||
}
|
||||
|
||||
// dumpACL prints out the ACL of the named file.
|
||||
func (d *demo) dumpACL(fileName string) {
|
||||
acl, err := d.bucket.Object(fileName).ACL().List(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
for _, v := range acl {
|
||||
fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role)
|
||||
}
|
||||
}
|
||||
|
||||
// acl displays the ACL for the named file.
|
||||
func (d *demo) acl(fileName string) {
|
||||
fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName)
|
||||
d.dumpACL(fileName)
|
||||
}
|
||||
|
||||
// putACLRule adds the "allUsers" ACL rule for the named file.
|
||||
func (d *demo) putACLRule(fileName string) {
|
||||
fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName)
|
||||
err := d.bucket.Object(fileName).ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader)
|
||||
if err != nil {
|
||||
d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
d.dumpACL(fileName)
|
||||
}
|
||||
|
||||
// deleteACLRule deleted the "allUsers" ACL rule for the named file.
|
||||
func (d *demo) deleteACLRule(fileName string) {
|
||||
fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName)
|
||||
err := d.bucket.Object(fileName).ACL().Delete(d.ctx, storage.AllUsers)
|
||||
if err != nil {
|
||||
d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
d.dumpACL(fileName)
|
||||
}
|
||||
|
||||
// deleteFiles deletes all the temporary files from a bucket created by this demo.
|
||||
func (d *demo) deleteFiles() {
|
||||
io.WriteString(d.w, "\nDeleting files...\n")
|
||||
for _, v := range d.cleanUp {
|
||||
fmt.Fprintf(d.w, "Deleting file %v\n", v)
|
||||
if err := d.bucket.Object(v).Delete(d.ctx); err != nil {
|
||||
d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", d.bucketName, v, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//[END sample]
|
8
vendor/cloud.google.com/go/examples/storage/appengine/app.yaml
generated
vendored
8
vendor/cloud.google.com/go/examples/storage/appengine/app.yaml
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
application: your-app-id
|
||||
version: v1
|
||||
runtime: go
|
||||
api_version: go1
|
||||
|
||||
handlers:
|
||||
- url: /.*
|
||||
script: _go_app
|
432
vendor/cloud.google.com/go/examples/storage/appenginevm/app.go
generated
vendored
432
vendor/cloud.google.com/go/examples/storage/appenginevm/app.go
generated
vendored
|
@ -1,432 +0,0 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package main is an example Mananged VM app using the Google Cloud Storage API.
|
||||
package main
|
||||
|
||||
//[START imports]
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/appengine"
|
||||
"google.golang.org/appengine/file"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
//[END imports]
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", handler)
|
||||
appengine.Main()
|
||||
}
|
||||
|
||||
//[START bucket_struct]
|
||||
// demo struct holds information needed to run the various demo functions.
|
||||
type demo struct {
|
||||
client *storage.Client
|
||||
bucketName string
|
||||
bucket *storage.BucketHandle
|
||||
|
||||
w io.Writer
|
||||
ctx context.Context
|
||||
// cleanUp is a list of filenames that need cleaning up at the end of the demo.
|
||||
cleanUp []string
|
||||
// failed indicates that one or more of the demo steps failed.
|
||||
failed bool
|
||||
}
|
||||
|
||||
//[END bucket_struct]
|
||||
|
||||
func (d *demo) errorf(format string, args ...interface{}) {
|
||||
d.failed = true
|
||||
fmt.Fprintln(d.w, fmt.Sprintf(format, args...))
|
||||
log.Errorf(d.ctx, format, args...)
|
||||
}
|
||||
|
||||
// handler is the main demo entry point that calls the GCS operations.
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := appengine.NewContext(r)
|
||||
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
//[START get_default_bucket]
|
||||
// Use `dev_appserver.py --default_gcs_bucket_name GCS_BUCKET_NAME`
|
||||
// when running locally.
|
||||
bucket, err := file.DefaultBucketName(ctx)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to get default GCS bucket name: %v", err)
|
||||
}
|
||||
//[END get_default_bucket]
|
||||
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "failed to create client: %v", err)
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(ctx))
|
||||
fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
d := &demo{
|
||||
w: buf,
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
bucket: client.Bucket(bucket),
|
||||
bucketName: bucket,
|
||||
}
|
||||
|
||||
n := "demo-testfile-go"
|
||||
d.createFile(n)
|
||||
d.readFile(n)
|
||||
d.copyFile(n)
|
||||
d.statFile(n)
|
||||
d.createListFiles()
|
||||
d.listBucket()
|
||||
d.listBucketDirMode()
|
||||
d.defaultACL()
|
||||
d.putDefaultACLRule()
|
||||
d.deleteDefaultACLRule()
|
||||
d.bucketACL()
|
||||
d.putBucketACLRule()
|
||||
d.deleteBucketACLRule()
|
||||
d.acl(n)
|
||||
d.putACLRule(n)
|
||||
d.deleteACLRule(n)
|
||||
d.deleteFiles()
|
||||
|
||||
if d.failed {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
buf.WriteTo(w)
|
||||
fmt.Fprintf(w, "\nDemo failed.\n")
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
buf.WriteTo(w)
|
||||
fmt.Fprintf(w, "\nDemo succeeded.\n")
|
||||
}
|
||||
}
|
||||
|
||||
//[START write]
|
||||
// createFile creates a file in Google Cloud Storage.
|
||||
func (d *demo) createFile(fileName string) {
|
||||
fmt.Fprintf(d.w, "Creating file /%v/%v\n", d.bucket, fileName)
|
||||
|
||||
wc := d.bucket.Object(fileName).NewWriter(d.ctx)
|
||||
wc.ContentType = "text/plain"
|
||||
wc.Metadata = map[string]string{
|
||||
"x-goog-meta-foo": "foo",
|
||||
"x-goog-meta-bar": "bar",
|
||||
}
|
||||
d.cleanUp = append(d.cleanUp, fileName)
|
||||
|
||||
if _, err := wc.Write([]byte("abcde\n")); err != nil {
|
||||
d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil {
|
||||
d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
if err := wc.Close(); err != nil {
|
||||
d.errorf("createFile: unable to close bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//[END write]
|
||||
|
||||
//[START read]
|
||||
// readFile reads the named file in Google Cloud Storage.
|
||||
func (d *demo) readFile(fileName string) {
|
||||
io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n")
|
||||
|
||||
rc, err := d.bucket.Object(fileName).NewReader(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("readFile: unable to open file from bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
slurp, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
d.errorf("readFile: unable to read data from bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0])
|
||||
if len(slurp) > 1024 {
|
||||
fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:])
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%s\n", slurp)
|
||||
}
|
||||
}
|
||||
|
||||
//[END read]
|
||||
|
||||
//[START copy]
|
||||
// copyFile copies a file in Google Cloud Storage.
|
||||
func (d *demo) copyFile(fileName string) {
|
||||
copyName := fileName + "-copy"
|
||||
fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", d.bucket, fileName, d.bucketName, copyName)
|
||||
|
||||
obj, err := d.bucket.Object(copyName).CopierFrom(d.bucket.Object(fileName)).Run(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", d.bucketName, fileName, d.bucketName, copyName, err)
|
||||
return
|
||||
}
|
||||
d.cleanUp = append(d.cleanUp, copyName)
|
||||
|
||||
d.dumpStats(obj)
|
||||
}
|
||||
|
||||
//[END copy]
|
||||
|
||||
func (d *demo) dumpStats(obj *storage.ObjectAttrs) {
|
||||
fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name)
|
||||
fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType)
|
||||
fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL)
|
||||
fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner)
|
||||
fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding)
|
||||
fmt.Fprintf(d.w, "Size: %v, ", obj.Size)
|
||||
fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5)
|
||||
fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C)
|
||||
fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata)
|
||||
fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink)
|
||||
fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass)
|
||||
if !obj.Deleted.IsZero() {
|
||||
fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted)
|
||||
}
|
||||
fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated)
|
||||
}
|
||||
|
||||
//[START file_metadata]
|
||||
// statFile reads the stats of the named file in Google Cloud Storage.
|
||||
func (d *demo) statFile(fileName string) {
|
||||
io.WriteString(d.w, "\nFile stat:\n")
|
||||
|
||||
obj, err := d.bucket.Object(fileName).Attrs(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
|
||||
d.dumpStats(obj)
|
||||
}
|
||||
|
||||
//[END file_metadata]
|
||||
|
||||
// createListFiles creates files that will be used by listBucket.
|
||||
func (d *demo) createListFiles() {
|
||||
io.WriteString(d.w, "\nCreating more files for listbucket...\n")
|
||||
for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} {
|
||||
d.createFile(n)
|
||||
}
|
||||
}
|
||||
|
||||
//[START list_bucket]
|
||||
// listBucket lists the contents of a bucket in Google Cloud Storage.
|
||||
func (d *demo) listBucket() {
|
||||
io.WriteString(d.w, "\nListbucket result:\n")
|
||||
|
||||
query := &storage.Query{Prefix: "foo"}
|
||||
it := d.bucket.Objects(d.ctx, query)
|
||||
for {
|
||||
obj, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
d.errorf("listBucket: unable to list bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpStats(obj)
|
||||
}
|
||||
}
|
||||
|
||||
//[END list_bucket]
|
||||
|
||||
func (d *demo) listDir(name, indent string) {
|
||||
query := &storage.Query{Prefix: name, Delimiter: "/"}
|
||||
it := d.bucket.Objects(d.ctx, query)
|
||||
for {
|
||||
obj, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
d.errorf("listBucketDirMode: unable to list bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
if obj.Prefix == "" {
|
||||
fmt.Fprint(d.w, indent)
|
||||
d.dumpStats(obj)
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, d.bucketName, obj.Prefix)
|
||||
d.listDir(obj.Prefix, indent+" ")
|
||||
}
|
||||
}
|
||||
|
||||
// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage.
|
||||
func (d *demo) listBucketDirMode() {
|
||||
io.WriteString(d.w, "\nListbucket directory mode result:\n")
|
||||
d.listDir("b", "")
|
||||
}
|
||||
|
||||
// dumpDefaultACL prints out the default object ACL for this bucket.
|
||||
func (d *demo) dumpDefaultACL() {
|
||||
acl, err := d.bucket.ACL().List(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
for _, v := range acl {
|
||||
fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role)
|
||||
}
|
||||
}
|
||||
|
||||
// defaultACL displays the default object ACL for this bucket.
|
||||
func (d *demo) defaultACL() {
|
||||
io.WriteString(d.w, "\nDefault object ACL:\n")
|
||||
d.dumpDefaultACL()
|
||||
}
|
||||
|
||||
// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket.
|
||||
func (d *demo) putDefaultACLRule() {
|
||||
io.WriteString(d.w, "\nPut Default object ACL Rule:\n")
|
||||
err := d.bucket.DefaultObjectACL().Set(d.ctx, storage.AllUsers, storage.RoleReader)
|
||||
if err != nil {
|
||||
d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpDefaultACL()
|
||||
}
|
||||
|
||||
// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket.
|
||||
func (d *demo) deleteDefaultACLRule() {
|
||||
io.WriteString(d.w, "\nDelete Default object ACL Rule:\n")
|
||||
err := d.bucket.DefaultObjectACL().Delete(d.ctx, storage.AllUsers)
|
||||
if err != nil {
|
||||
d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpDefaultACL()
|
||||
}
|
||||
|
||||
// dumpBucketACL prints out the bucket ACL.
|
||||
func (d *demo) dumpBucketACL() {
|
||||
acl, err := d.bucket.ACL().List(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
for _, v := range acl {
|
||||
fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role)
|
||||
}
|
||||
}
|
||||
|
||||
// bucketACL displays the bucket ACL for this bucket.
|
||||
func (d *demo) bucketACL() {
|
||||
io.WriteString(d.w, "\nBucket ACL:\n")
|
||||
d.dumpBucketACL()
|
||||
}
|
||||
|
||||
// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket.
|
||||
func (d *demo) putBucketACLRule() {
|
||||
io.WriteString(d.w, "\nPut Bucket ACL Rule:\n")
|
||||
err := d.bucket.ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader)
|
||||
if err != nil {
|
||||
d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpBucketACL()
|
||||
}
|
||||
|
||||
// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket.
|
||||
func (d *demo) deleteBucketACLRule() {
|
||||
io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n")
|
||||
err := d.bucket.ACL().Delete(d.ctx, storage.AllUsers)
|
||||
if err != nil {
|
||||
d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", d.bucketName, err)
|
||||
return
|
||||
}
|
||||
d.dumpBucketACL()
|
||||
}
|
||||
|
||||
// dumpACL prints out the ACL of the named file.
|
||||
func (d *demo) dumpACL(fileName string) {
|
||||
acl, err := d.bucket.Object(fileName).ACL().List(d.ctx)
|
||||
if err != nil {
|
||||
d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
for _, v := range acl {
|
||||
fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role)
|
||||
}
|
||||
}
|
||||
|
||||
// acl displays the ACL for the named file.
|
||||
func (d *demo) acl(fileName string) {
|
||||
fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName)
|
||||
d.dumpACL(fileName)
|
||||
}
|
||||
|
||||
// putACLRule adds the "allUsers" ACL rule for the named file.
|
||||
func (d *demo) putACLRule(fileName string) {
|
||||
fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName)
|
||||
err := d.bucket.Object(fileName).ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader)
|
||||
if err != nil {
|
||||
d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
d.dumpACL(fileName)
|
||||
}
|
||||
|
||||
// deleteACLRule deleted the "allUsers" ACL rule for the named file.
|
||||
func (d *demo) deleteACLRule(fileName string) {
|
||||
fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName)
|
||||
err := d.bucket.Object(fileName).ACL().Delete(d.ctx, storage.AllUsers)
|
||||
if err != nil {
|
||||
d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err)
|
||||
return
|
||||
}
|
||||
d.dumpACL(fileName)
|
||||
}
|
||||
|
||||
//[START delete]
|
||||
// deleteFiles deletes all the temporary files from a bucket created by this demo.
|
||||
func (d *demo) deleteFiles() {
|
||||
io.WriteString(d.w, "\nDeleting files...\n")
|
||||
for _, v := range d.cleanUp {
|
||||
fmt.Fprintf(d.w, "Deleting file %v\n", v)
|
||||
if err := d.bucket.Object(v).Delete(d.ctx); err != nil {
|
||||
d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", d.bucketName, v, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//[END delete]
|
10
vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml
generated
vendored
10
vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
runtime: go
|
||||
api_version: 1
|
||||
vm: true
|
||||
|
||||
manual_scaling:
|
||||
instances: 1
|
||||
|
||||
handlers:
|
||||
- url: /.*
|
||||
script: _go_app
|
19
vendor/cloud.google.com/go/iam/admin/apiv1/doc.go
generated
vendored
19
vendor/cloud.google.com/go/iam/admin/apiv1/doc.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package admin is an experimental, auto-generated package for the
|
||||
// admin API.
|
||||
// Google Identity and Access Management (IAM) API.
|
||||
//
|
||||
// Manages identity and access control for Google Cloud Platform resources,
|
||||
// including the creation of service accounts, which you can use to
|
||||
|
@ -27,9 +27,18 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val string) context.Context {
|
||||
md, _ := metadata.FromContext(ctx)
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = []string{val}
|
||||
return metadata.NewContext(ctx, md)
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the authentication scopes required
|
||||
// by this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/iam",
|
||||
}
|
||||
}
|
||||
|
|
148
vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go
generated
vendored
148
vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go
generated
vendored
|
@ -59,10 +59,7 @@ type IamCallOptions struct {
|
|||
func defaultIamClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("iam.googleapis.com:443"),
|
||||
option.WithScopes(
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/iam",
|
||||
),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,17 +77,6 @@ func defaultIamCallOptions() *IamCallOptions {
|
|||
})
|
||||
}),
|
||||
},
|
||||
{"default", "non_idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &IamCallOptions{
|
||||
ListServiceAccounts: retry[[2]string{"default", "idempotent"}],
|
||||
|
@ -122,7 +108,7 @@ type IamClient struct {
|
|||
CallOptions *IamCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader string
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewIamClient creates a new iam client.
|
||||
|
@ -174,8 +160,8 @@ func (c *IamClient) Close() error {
|
|||
// use by Google-written clients.
|
||||
func (c *IamClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "")
|
||||
c.xGoogHeader = gax.XGoogHeader(kv...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// IamProjectPath returns the path for the project resource.
|
||||
|
@ -215,8 +201,9 @@ func IamKeyPath(project, serviceAccount, key string) string {
|
|||
}
|
||||
|
||||
// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project.
|
||||
func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) *ServiceAccountIterator {
|
||||
func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest, opts ...gax.CallOption) *ServiceAccountIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListServiceAccounts[0:len(c.CallOptions.ListServiceAccounts):len(c.CallOptions.ListServiceAccounts)], opts...)
|
||||
it := &ServiceAccountIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*adminpb.ServiceAccount, string, error) {
|
||||
var resp *adminpb.ListServiceAccountsResponse
|
||||
|
@ -226,11 +213,11 @@ func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListSe
|
|||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.ListServiceAccounts(ctx, req)
|
||||
resp, err = c.iamClient.ListServiceAccounts(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListServiceAccounts...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
@ -249,14 +236,15 @@ func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListSe
|
|||
}
|
||||
|
||||
// GetServiceAccount gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
|
||||
func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) {
|
||||
func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.GetServiceAccount[0:len(c.CallOptions.GetServiceAccount):len(c.CallOptions.GetServiceAccount)], opts...)
|
||||
var resp *adminpb.ServiceAccount
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.GetServiceAccount(ctx, req)
|
||||
resp, err = c.iamClient.GetServiceAccount(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.GetServiceAccount...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -265,14 +253,15 @@ func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServi
|
|||
|
||||
// CreateServiceAccount creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]
|
||||
// and returns it.
|
||||
func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) {
|
||||
func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.CreateServiceAccount[0:len(c.CallOptions.CreateServiceAccount):len(c.CallOptions.CreateServiceAccount)], opts...)
|
||||
var resp *adminpb.ServiceAccount
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.CreateServiceAccount(ctx, req)
|
||||
resp, err = c.iamClient.CreateServiceAccount(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.CreateServiceAccount...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -284,14 +273,15 @@ func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.Creat
|
|||
// Currently, only the following fields are updatable:
|
||||
// `display_name` .
|
||||
// The `etag` is mandatory.
|
||||
func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) {
|
||||
func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...)
|
||||
var resp *adminpb.ServiceAccount
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.UpdateServiceAccount(ctx, req)
|
||||
resp, err = c.iamClient.UpdateServiceAccount(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.UpdateServiceAccount...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -299,25 +289,27 @@ func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.Servi
|
|||
}
|
||||
|
||||
// DeleteServiceAccount deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
|
||||
func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) error {
|
||||
func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
opts = append(c.CallOptions.DeleteServiceAccount[0:len(c.CallOptions.DeleteServiceAccount):len(c.CallOptions.DeleteServiceAccount)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.iamClient.DeleteServiceAccount(ctx, req)
|
||||
_, err = c.iamClient.DeleteServiceAccount(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.DeleteServiceAccount...)
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListServiceAccountKeys lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey].
|
||||
func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) {
|
||||
func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest, opts ...gax.CallOption) (*adminpb.ListServiceAccountKeysResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListServiceAccountKeys[0:len(c.CallOptions.ListServiceAccountKeys):len(c.CallOptions.ListServiceAccountKeys)], opts...)
|
||||
var resp *adminpb.ListServiceAccountKeysResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.ListServiceAccountKeys(ctx, req)
|
||||
resp, err = c.iamClient.ListServiceAccountKeys(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.ListServiceAccountKeys...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -326,14 +318,15 @@ func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.Lis
|
|||
|
||||
// GetServiceAccountKey gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]
|
||||
// by key id.
|
||||
func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) {
|
||||
func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.GetServiceAccountKey[0:len(c.CallOptions.GetServiceAccountKey):len(c.CallOptions.GetServiceAccountKey)], opts...)
|
||||
var resp *adminpb.ServiceAccountKey
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.GetServiceAccountKey(ctx, req)
|
||||
resp, err = c.iamClient.GetServiceAccountKey(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.GetServiceAccountKey...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -342,14 +335,15 @@ func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetSe
|
|||
|
||||
// CreateServiceAccountKey creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]
|
||||
// and returns it.
|
||||
func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) {
|
||||
func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.CreateServiceAccountKey[0:len(c.CallOptions.CreateServiceAccountKey):len(c.CallOptions.CreateServiceAccountKey)], opts...)
|
||||
var resp *adminpb.ServiceAccountKey
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.CreateServiceAccountKey(ctx, req)
|
||||
resp, err = c.iamClient.CreateServiceAccountKey(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.CreateServiceAccountKey...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -357,25 +351,27 @@ func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.Cr
|
|||
}
|
||||
|
||||
// DeleteServiceAccountKey deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey].
|
||||
func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) error {
|
||||
func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
opts = append(c.CallOptions.DeleteServiceAccountKey[0:len(c.CallOptions.DeleteServiceAccountKey):len(c.CallOptions.DeleteServiceAccountKey)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.iamClient.DeleteServiceAccountKey(ctx, req)
|
||||
_, err = c.iamClient.DeleteServiceAccountKey(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.DeleteServiceAccountKey...)
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// SignBlob signs a blob using a service account's system-managed private key.
|
||||
func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) {
|
||||
func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest, opts ...gax.CallOption) (*adminpb.SignBlobResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.SignBlob[0:len(c.CallOptions.SignBlob):len(c.CallOptions.SignBlob)], opts...)
|
||||
var resp *adminpb.SignBlobResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.SignBlob(ctx, req)
|
||||
resp, err = c.iamClient.SignBlob(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.SignBlob...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -384,14 +380,15 @@ func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest)
|
|||
|
||||
// getIamPolicy returns the IAM access control policy for a
|
||||
// [ServiceAccount][google.iam.admin.v1.ServiceAccount].
|
||||
func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
|
||||
func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.GetIamPolicy(ctx, req)
|
||||
resp, err = c.iamClient.GetIamPolicy(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.GetIamPolicy...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -400,14 +397,15 @@ func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyReq
|
|||
|
||||
// setIamPolicy sets the IAM access control policy for a
|
||||
// [ServiceAccount][google.iam.admin.v1.ServiceAccount].
|
||||
func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
|
||||
func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.SetIamPolicy(ctx, req)
|
||||
resp, err = c.iamClient.SetIamPolicy(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.SetIamPolicy...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -416,14 +414,15 @@ func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReq
|
|||
|
||||
// TestIamPermissions tests the specified permissions against the IAM access control policy
|
||||
// for a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
|
||||
func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
|
||||
func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...)
|
||||
var resp *iampb.TestIamPermissionsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.TestIamPermissions(ctx, req)
|
||||
resp, err = c.iamClient.TestIamPermissions(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.TestIamPermissions...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -433,14 +432,15 @@ func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPe
|
|||
// QueryGrantableRoles queries roles that can be granted on a particular resource.
|
||||
// A role is grantable if it can be used as the role in a binding for a policy
|
||||
// for that resource.
|
||||
func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) {
|
||||
func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest, opts ...gax.CallOption) (*adminpb.QueryGrantableRolesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.QueryGrantableRoles[0:len(c.CallOptions.QueryGrantableRoles):len(c.CallOptions.QueryGrantableRoles)], opts...)
|
||||
var resp *adminpb.QueryGrantableRolesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.iamClient.QueryGrantableRoles(ctx, req)
|
||||
resp, err = c.iamClient.QueryGrantableRoles(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, c.CallOptions.QueryGrantableRoles...)
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
5
vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go
generated
vendored
5
vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go
generated
vendored
|
@ -19,6 +19,7 @@ package admin_test
|
|||
import (
|
||||
"cloud.google.com/go/iam/admin/apiv1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
adminpb "google.golang.org/genproto/googleapis/iam/admin/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
@ -46,9 +47,11 @@ func ExampleIamClient_ListServiceAccounts() {
|
|||
it := c.ListServiceAccounts(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
break
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
|
|
210
vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go
generated
vendored
210
vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go
generated
vendored
|
@ -17,17 +17,19 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
google_protobuf "github.com/golang/protobuf/ptypes/empty"
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
adminpb "google.golang.org/genproto/googleapis/iam/admin/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -37,6 +39,8 @@ import (
|
|||
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
gstatus "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var _ = io.EOF
|
||||
|
@ -58,7 +62,11 @@ type mockIamServer struct {
|
|||
resps []proto.Message
|
||||
}
|
||||
|
||||
func (s *mockIamServer) ListServiceAccounts(_ context.Context, req *adminpb.ListServiceAccountsRequest) (*adminpb.ListServiceAccountsResponse, error) {
|
||||
func (s *mockIamServer) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) (*adminpb.ListServiceAccountsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -66,7 +74,11 @@ func (s *mockIamServer) ListServiceAccounts(_ context.Context, req *adminpb.List
|
|||
return s.resps[0].(*adminpb.ListServiceAccountsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) GetServiceAccount(_ context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) {
|
||||
func (s *mockIamServer) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -74,7 +86,11 @@ func (s *mockIamServer) GetServiceAccount(_ context.Context, req *adminpb.GetSer
|
|||
return s.resps[0].(*adminpb.ServiceAccount), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) CreateServiceAccount(_ context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) {
|
||||
func (s *mockIamServer) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -82,7 +98,11 @@ func (s *mockIamServer) CreateServiceAccount(_ context.Context, req *adminpb.Cre
|
|||
return s.resps[0].(*adminpb.ServiceAccount), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) UpdateServiceAccount(_ context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) {
|
||||
func (s *mockIamServer) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -90,15 +110,23 @@ func (s *mockIamServer) UpdateServiceAccount(_ context.Context, req *adminpb.Ser
|
|||
return s.resps[0].(*adminpb.ServiceAccount), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) DeleteServiceAccount(_ context.Context, req *adminpb.DeleteServiceAccountRequest) (*google_protobuf.Empty, error) {
|
||||
func (s *mockIamServer) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) (*emptypb.Empty, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*google_protobuf.Empty), nil
|
||||
return s.resps[0].(*emptypb.Empty), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) ListServiceAccountKeys(_ context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) {
|
||||
func (s *mockIamServer) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -106,7 +134,11 @@ func (s *mockIamServer) ListServiceAccountKeys(_ context.Context, req *adminpb.L
|
|||
return s.resps[0].(*adminpb.ListServiceAccountKeysResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) GetServiceAccountKey(_ context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) {
|
||||
func (s *mockIamServer) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -114,7 +146,11 @@ func (s *mockIamServer) GetServiceAccountKey(_ context.Context, req *adminpb.Get
|
|||
return s.resps[0].(*adminpb.ServiceAccountKey), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) CreateServiceAccountKey(_ context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) {
|
||||
func (s *mockIamServer) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -122,15 +158,23 @@ func (s *mockIamServer) CreateServiceAccountKey(_ context.Context, req *adminpb.
|
|||
return s.resps[0].(*adminpb.ServiceAccountKey), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) DeleteServiceAccountKey(_ context.Context, req *adminpb.DeleteServiceAccountKeyRequest) (*google_protobuf.Empty, error) {
|
||||
func (s *mockIamServer) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) (*emptypb.Empty, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*google_protobuf.Empty), nil
|
||||
return s.resps[0].(*emptypb.Empty), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) SignBlob(_ context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) {
|
||||
func (s *mockIamServer) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -138,7 +182,11 @@ func (s *mockIamServer) SignBlob(_ context.Context, req *adminpb.SignBlobRequest
|
|||
return s.resps[0].(*adminpb.SignBlobResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
|
||||
func (s *mockIamServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -146,7 +194,11 @@ func (s *mockIamServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyR
|
|||
return s.resps[0].(*iampb.Policy), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
|
||||
func (s *mockIamServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -154,7 +206,11 @@ func (s *mockIamServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyR
|
|||
return s.resps[0].(*iampb.Policy), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
|
||||
func (s *mockIamServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -162,7 +218,11 @@ func (s *mockIamServer) TestIamPermissions(_ context.Context, req *iampb.TestIam
|
|||
return s.resps[0].(*iampb.TestIamPermissionsResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockIamServer) QueryGrantableRoles(_ context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) {
|
||||
func (s *mockIamServer) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
|
@ -249,8 +309,8 @@ func TestIamListServiceAccounts(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamListServiceAccountsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamProjectPath("[PROJECT]")
|
||||
var request = &adminpb.ListServiceAccountsRequest{
|
||||
|
@ -264,7 +324,9 @@ func TestIamListServiceAccountsError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListServiceAccounts(context.Background(), request).Next()
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -318,8 +380,8 @@ func TestIamGetServiceAccount(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamGetServiceAccountError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var request = &adminpb.GetServiceAccountRequest{
|
||||
|
@ -333,7 +395,9 @@ func TestIamGetServiceAccountError(t *testing.T) {
|
|||
|
||||
resp, err := c.GetServiceAccount(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -389,8 +453,8 @@ func TestIamCreateServiceAccount(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamCreateServiceAccountError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamProjectPath("[PROJECT]")
|
||||
var accountId string = "accountId-803333011"
|
||||
|
@ -406,7 +470,9 @@ func TestIamCreateServiceAccountError(t *testing.T) {
|
|||
|
||||
resp, err := c.CreateServiceAccount(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -460,8 +526,8 @@ func TestIamUpdateServiceAccount(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamUpdateServiceAccountError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var etag []byte = []byte("21")
|
||||
var request = &adminpb.ServiceAccount{
|
||||
|
@ -475,13 +541,15 @@ func TestIamUpdateServiceAccountError(t *testing.T) {
|
|||
|
||||
resp, err := c.UpdateServiceAccount(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestIamDeleteServiceAccount(t *testing.T) {
|
||||
var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{}
|
||||
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
|
||||
|
||||
mockIam.err = nil
|
||||
mockIam.reqs = nil
|
||||
|
@ -511,8 +579,8 @@ func TestIamDeleteServiceAccount(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamDeleteServiceAccountError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var request = &adminpb.DeleteServiceAccountRequest{
|
||||
|
@ -526,7 +594,9 @@ func TestIamDeleteServiceAccountError(t *testing.T) {
|
|||
|
||||
err = c.DeleteServiceAccount(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
}
|
||||
|
@ -564,8 +634,8 @@ func TestIamListServiceAccountKeys(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamListServiceAccountKeysError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var request = &adminpb.ListServiceAccountKeysRequest{
|
||||
|
@ -579,7 +649,9 @@ func TestIamListServiceAccountKeysError(t *testing.T) {
|
|||
|
||||
resp, err := c.ListServiceAccountKeys(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -625,8 +697,8 @@ func TestIamGetServiceAccountKey(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamGetServiceAccountKeyError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]")
|
||||
var request = &adminpb.GetServiceAccountKeyRequest{
|
||||
|
@ -640,7 +712,9 @@ func TestIamGetServiceAccountKeyError(t *testing.T) {
|
|||
|
||||
resp, err := c.GetServiceAccountKey(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -686,8 +760,8 @@ func TestIamCreateServiceAccountKey(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamCreateServiceAccountKeyError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var request = &adminpb.CreateServiceAccountKeyRequest{
|
||||
|
@ -701,13 +775,15 @@ func TestIamCreateServiceAccountKeyError(t *testing.T) {
|
|||
|
||||
resp, err := c.CreateServiceAccountKey(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestIamDeleteServiceAccountKey(t *testing.T) {
|
||||
var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{}
|
||||
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
|
||||
|
||||
mockIam.err = nil
|
||||
mockIam.reqs = nil
|
||||
|
@ -737,8 +813,8 @@ func TestIamDeleteServiceAccountKey(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamDeleteServiceAccountKeyError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]")
|
||||
var request = &adminpb.DeleteServiceAccountKeyRequest{
|
||||
|
@ -752,7 +828,9 @@ func TestIamDeleteServiceAccountKeyError(t *testing.T) {
|
|||
|
||||
err = c.DeleteServiceAccountKey(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
}
|
||||
|
@ -797,8 +875,8 @@ func TestIamSignBlob(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamSignBlobError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var bytesToSign []byte = []byte("45")
|
||||
|
@ -814,7 +892,9 @@ func TestIamSignBlobError(t *testing.T) {
|
|||
|
||||
resp, err := c.SignBlob(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -858,8 +938,8 @@ func TestIamGetIamPolicy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamGetIamPolicyError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var request = &iampb.GetIamPolicyRequest{
|
||||
|
@ -873,7 +953,9 @@ func TestIamGetIamPolicyError(t *testing.T) {
|
|||
|
||||
resp, err := c.getIamPolicy(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -919,8 +1001,8 @@ func TestIamSetIamPolicy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamSetIamPolicyError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var policy *iampb.Policy = &iampb.Policy{}
|
||||
|
@ -936,7 +1018,9 @@ func TestIamSetIamPolicyError(t *testing.T) {
|
|||
|
||||
resp, err := c.setIamPolicy(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -977,8 +1061,8 @@ func TestIamTestIamPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamTestIamPermissionsError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
|
||||
var permissions []string = nil
|
||||
|
@ -994,7 +1078,9 @@ func TestIamTestIamPermissionsError(t *testing.T) {
|
|||
|
||||
resp, err := c.TestIamPermissions(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
@ -1033,8 +1119,8 @@ func TestIamQueryGrantableRoles(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIamQueryGrantableRolesError(t *testing.T) {
|
||||
errCode := codes.Internal
|
||||
mockIam.err = grpc.Errorf(errCode, "test error")
|
||||
errCode := codes.PermissionDenied
|
||||
mockIam.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var fullResourceName string = "fullResourceName1300993644"
|
||||
var request = &adminpb.QueryGrantableRolesRequest{
|
||||
|
@ -1048,7 +1134,9 @@ func TestIamQueryGrantableRolesError(t *testing.T) {
|
|||
|
||||
resp, err := c.QueryGrantableRoles(context.Background(), request)
|
||||
|
||||
if c := grpc.Code(err); c != errCode {
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
|
|
109
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
109
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
|
@ -27,9 +27,47 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||
type client interface {
|
||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||
}
|
||||
|
||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||
type grpcClient struct {
|
||||
c pb.IAMPolicyClient
|
||||
}
|
||||
|
||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto, nil
|
||||
}
|
||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Policy: p,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: resource,
|
||||
Permissions: perms,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
// A Handle provides IAM operations for a resource.
|
||||
type Handle struct {
|
||||
c pb.IAMPolicyClient
|
||||
c client
|
||||
resource string
|
||||
}
|
||||
|
||||
|
@ -38,15 +76,23 @@ type Handle struct {
|
|||
// InternalNewHandle returns a Handle for resource.
|
||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
||||
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// client implementation.
|
||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||
return &Handle{
|
||||
c: pb.NewIAMPolicyClient(conn),
|
||||
c: c,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||
proto, err := h.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: h.resource})
|
||||
proto, err := h.c.Get(ctx, h.resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -58,23 +104,12 @@ func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
|||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||
_, err := h.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: h.resource,
|
||||
Policy: policy.InternalProto,
|
||||
})
|
||||
return err
|
||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
res, err := h.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: h.resource,
|
||||
Permissions: permissions,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
||||
|
||||
// A RoleName is a name representing a collection of permissions.
|
||||
|
@ -146,16 +181,30 @@ func (p *Policy) Add(member string, r RoleName) {
|
|||
|
||||
// Remove removes member from role r if it is present.
|
||||
func (p *Policy) Remove(member string, r RoleName) {
|
||||
b := p.binding(r)
|
||||
i := memberIndex(member, b)
|
||||
if i < 0 {
|
||||
bi := p.bindingIndex(r)
|
||||
if bi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter, so move the last member into the
|
||||
// removed spot and shrink the slice.
|
||||
bindings := p.InternalProto.Bindings
|
||||
b := bindings[bi]
|
||||
mi := memberIndex(member, b)
|
||||
if mi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||
// into the removed spot and shrink the slice.
|
||||
if len(b.Members) == 1 {
|
||||
// Remove binding.
|
||||
last := len(bindings) - 1
|
||||
bindings[bi] = bindings[last]
|
||||
bindings[last] = nil
|
||||
p.InternalProto.Bindings = bindings[:last]
|
||||
return
|
||||
}
|
||||
// Remove member.
|
||||
// TODO(jba): worry about multiple copies of m?
|
||||
last := len(b.Members) - 1
|
||||
b.Members[i] = b.Members[last]
|
||||
b.Members[mi] = b.Members[last]
|
||||
b.Members[last] = ""
|
||||
b.Members = b.Members[:last]
|
||||
}
|
||||
|
@ -174,15 +223,23 @@ func (p *Policy) Roles() []RoleName {
|
|||
|
||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||
if p.InternalProto == nil {
|
||||
i := p.bindingIndex(r)
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
for _, b := range p.InternalProto.Bindings {
|
||||
return p.InternalProto.Bindings[i]
|
||||
}
|
||||
|
||||
func (p *Policy) bindingIndex(r RoleName) int {
|
||||
if p.InternalProto == nil {
|
||||
return -1
|
||||
}
|
||||
for i, b := range p.InternalProto.Bindings {
|
||||
if b.Role == string(r) {
|
||||
return b
|
||||
return i
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return -1
|
||||
}
|
||||
|
||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||
|
|
4
vendor/cloud.google.com/go/iam/iam_test.go
generated
vendored
4
vendor/cloud.google.com/go/iam/iam_test.go
generated
vendored
|
@ -62,10 +62,10 @@ func TestPolicy(t *testing.T) {
|
|||
t.Fatal(msg)
|
||||
}
|
||||
remove("m2", Owner)
|
||||
if msg, ok := checkMembers(p, Owner, []string{}); !ok {
|
||||
if msg, ok := checkMembers(p, Owner, nil); !ok {
|
||||
t.Fatal(msg)
|
||||
}
|
||||
if got, want := p.Roles(), []RoleName{Owner}; !reflect.DeepEqual(got, want) {
|
||||
if got, want := p.Roles(), []RoleName(nil); !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("roles: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
|
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package internal provides support for the cloud packages.
|
||||
//
|
||||
// Users should not import this package directly.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const userAgent = "gcloud-golang/0.1"
|
||||
|
||||
// Transport is an http.RoundTripper that appends Google Cloud client's
|
||||
// user-agent to the original request's user-agent header.
|
||||
type Transport struct {
|
||||
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
|
||||
// Do User-Agent some other way.
|
||||
|
||||
// Base is the actual http.RoundTripper
|
||||
// requests will use. It must not be nil.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip appends a user-agent to the existing user-agent
|
||||
// header and delegates the request to the base http.RoundTripper.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req = cloneRequest(req)
|
||||
ua := req.Header.Get("User-Agent")
|
||||
if ua == "" {
|
||||
ua = userAgent
|
||||
} else {
|
||||
ua = fmt.Sprintf("%s %s", ua, userAgent)
|
||||
}
|
||||
req.Header.Set("User-Agent", ua)
|
||||
return t.Base.RoundTrip(req)
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
5
vendor/cloud.google.com/go/internal/kokoro/build.sh
generated
vendored
5
vendor/cloud.google.com/go/internal/kokoro/build.sh
generated
vendored
|
@ -38,4 +38,7 @@ go get -v ./...
|
|||
# cd $GOCLOUD_HOME
|
||||
|
||||
# Run tests and tee output to log file, to be pushed to GCS as artifact.
|
||||
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log
|
||||
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log
|
||||
|
||||
# Make sure README.md is up to date.
|
||||
make -C internal/readme test diff
|
||||
|
|
13
vendor/cloud.google.com/go/internal/pretty/pretty.go
generated
vendored
13
vendor/cloud.google.com/go/internal/pretty/pretty.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Indent is the string output at each level of indentation.
|
||||
|
@ -58,7 +59,15 @@ type state struct {
|
|||
defaults bool
|
||||
}
|
||||
|
||||
const maxLevel = 100
|
||||
|
||||
var typeOfTime = reflect.TypeOf(time.Time{})
|
||||
|
||||
func fprint(w io.Writer, v reflect.Value, s state) {
|
||||
if s.level > maxLevel {
|
||||
fmt.Fprintln(w, "pretty: max nested depth exceeded")
|
||||
return
|
||||
}
|
||||
indent := strings.Repeat(Indent, s.level)
|
||||
fmt.Fprintf(w, "%s%s", indent, s.prefix)
|
||||
if isNil(v) {
|
||||
|
@ -68,6 +77,10 @@ func fprint(w io.Writer, v reflect.Value, s state) {
|
|||
if v.Type().Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Type() == typeOfTime {
|
||||
fmt.Fprintf(w, "%s%s", v.Interface(), s.suffix)
|
||||
return
|
||||
}
|
||||
for v.Type().Kind() == reflect.Ptr {
|
||||
fmt.Fprintf(w, "&")
|
||||
v = v.Elem()
|
||||
|
|
48
vendor/cloud.google.com/go/internal/readme/Makefile
generated
vendored
Normal file
48
vendor/cloud.google.com/go/internal/readme/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
# Rebuild the README.md file at repo root by inserting code samples
|
||||
# from compilable go files.
|
||||
|
||||
SHELL=/bin/bash
|
||||
|
||||
GOCLOUD_HOME=$(GOPATH)/src/cloud.google.com/go
|
||||
README=$(GOCLOUD_HOME)/README.md
|
||||
|
||||
.PHONY: readme test test-good test-bad-go test-bad-md
|
||||
|
||||
readme:
|
||||
@tmp=$$(mktemp); \
|
||||
awk -f snipmd.awk snippets.go $(README) > $$tmp; \
|
||||
mv $$tmp $(README)
|
||||
|
||||
diff:
|
||||
diff $(README) <(awk -f snipmd.awk snippets.go $(README))
|
||||
|
||||
test: test-good test-bad-go test-bad-md
|
||||
@echo PASS
|
||||
|
||||
test-good:
|
||||
@echo testdata/good.md
|
||||
@cd testdata >& /dev/null; \
|
||||
diff -u want.md <(awk -f ../snipmd.awk snips.go good.md)
|
||||
@echo "testdata/want.md (round trip)"
|
||||
@cd testdata >& /dev/null; \
|
||||
diff -u want.md <(awk -f ../snipmd.awk snips.go want.md)
|
||||
|
||||
test-bad-go:
|
||||
@for f in testdata/bad-*.go; do \
|
||||
echo $$f; \
|
||||
if awk -f snipmd.awk $$f >& /dev/null; then \
|
||||
echo "$f succeeded, want failure"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
test-bad-md:
|
||||
@for f in testdata/bad-*.md; do \
|
||||
echo $$f; \
|
||||
if awk -f snipmd.awk testdata/snips.go $$f >& /dev/null; then \
|
||||
echo "$f succeeded, want failure"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
|
123
vendor/cloud.google.com/go/internal/readme/snipmd.awk
generated
vendored
Normal file
123
vendor/cloud.google.com/go/internal/readme/snipmd.awk
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
# Copyright 2017 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# snipmd inserts code snippets from Go source files into a markdown file.
|
||||
#
|
||||
# Call with one or more .go files and a .md file:
|
||||
#
|
||||
# awk -f snipmd.awk foo.go bar.go template.md
|
||||
#
|
||||
# In the Go files, start a snippet with
|
||||
# //[ NAME
|
||||
# and end it with
|
||||
# //]
|
||||
#
|
||||
# In the markdown, write
|
||||
# [snip]:# NAME
|
||||
# to insert the snippet NAME just below that line.
|
||||
# If there is already a code block after the [snip]:# line, it will be
|
||||
# replaced, so a previous output can be used as input.
|
||||
#
|
||||
# The following transformations are made to the Go code:
|
||||
# - The first tab of each line is removed.
|
||||
# - Trailing blank lines are removed.
|
||||
# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...`
|
||||
|
||||
|
||||
/^[ \t]*\/\/\[/ { # start snippet in Go file
|
||||
if (inGo()) {
|
||||
if ($2 == "") {
|
||||
die("missing snippet name")
|
||||
}
|
||||
curSnip = $2
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
/^[ \t]*\/\/]/ { # end snippet in Go file
|
||||
if (inGo()) {
|
||||
if (curSnip != "") {
|
||||
# Remove all but one trailing newline.
|
||||
gsub(/\n+$/, "\n", snips[curSnip])
|
||||
curSnip = ""
|
||||
next
|
||||
} else {
|
||||
die("//] without corresponding //[")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ENDFILE {
|
||||
if (curSnip != "") {
|
||||
die("unclosed snippet: " curSnip)
|
||||
}
|
||||
}
|
||||
|
||||
# Skip code blocks in the input that immediately follow [snip]:# lines,
|
||||
# because we just inserted the snippet. Supports round-tripping.
|
||||
/^```go$/,/^```$/ {
|
||||
if (inMarkdown() && afterSnip) {
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
# Matches every line.
|
||||
{
|
||||
if (curSnip != "") {
|
||||
line = $0
|
||||
# Remove initial tab, if any.
|
||||
if (line ~ /^\t/) {
|
||||
line = substr(line, 2)
|
||||
}
|
||||
# Replace ELLIPSIS.
|
||||
gsub(/_ = ELLIPSIS/, "...", line)
|
||||
gsub(/ELLIPSIS/, "...", line)
|
||||
|
||||
snips[curSnip] = snips[curSnip] line "\n"
|
||||
} else if (inMarkdown()) {
|
||||
afterSnip = 0
|
||||
# Copy .md to output.
|
||||
print
|
||||
}
|
||||
}
|
||||
|
||||
$1 ~ /\[snip\]:#/ { # Snippet marker in .md file.
|
||||
if (inMarkdown()) {
|
||||
# We expect '[snip]:#' to be followed by '(NAME)'
|
||||
if ($2 !~ /\(.*\)/) {
|
||||
die("bad snip spec: " $0)
|
||||
}
|
||||
name = substr($2, 2, length($2)-2)
|
||||
if (snips[name] == "") {
|
||||
die("no snippet named " name)
|
||||
}
|
||||
printf("```go\n%s```\n", snips[name])
|
||||
afterSnip = 1
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function inMarkdown() {
|
||||
return match(FILENAME, /\.md$/)
|
||||
}
|
||||
|
||||
function inGo() {
|
||||
return match(FILENAME, /\.go$/)
|
||||
}
|
||||
|
||||
|
||||
function die(msg) {
|
||||
printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr"
|
||||
exit 1
|
||||
}
|
241
vendor/cloud.google.com/go/internal/readme/snippets.go
generated
vendored
Normal file
241
vendor/cloud.google.com/go/internal/readme/snippets.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file holds samples that are embedded into README.md.
|
||||
|
||||
// This file has to compile, but need not execute.
|
||||
// If it fails to compile, fix it, then run `make` to regenerate README.md.
|
||||
|
||||
package readme
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"cloud.google.com/go/datastore"
|
||||
"cloud.google.com/go/logging"
|
||||
"cloud.google.com/go/pubsub"
|
||||
"cloud.google.com/go/spanner"
|
||||
"cloud.google.com/go/storage"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var ctx context.Context
|
||||
|
||||
const END = 0
|
||||
|
||||
func auth() {
|
||||
//[ auth
|
||||
client, err := storage.NewClient(ctx)
|
||||
//]
|
||||
_ = client
|
||||
_ = err
|
||||
}
|
||||
|
||||
func auth2() {
|
||||
//[ auth-JSON
|
||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
|
||||
//]
|
||||
_ = client
|
||||
_ = err
|
||||
}
|
||||
|
||||
func auth3() {
|
||||
var ELLIPSIS oauth2.TokenSource
|
||||
//[ auth-ts
|
||||
tokenSource := ELLIPSIS
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
//]
|
||||
_ = client
|
||||
_ = err
|
||||
}
|
||||
|
||||
func datastoreSnippets() {
|
||||
//[ datastore-1
|
||||
client, err := datastore.NewClient(ctx, "my-project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
|
||||
//[ datastore-2
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NameKey("Post", "post1", nil),
|
||||
datastore.NameKey("Post", "post2", nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
}
|
||||
|
||||
func storageSnippets() {
|
||||
//[ storage-1
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
|
||||
//[ storage-2
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
_ = body
|
||||
}
|
||||
|
||||
func pubsubSnippets() {
|
||||
//[ pubsub-1
|
||||
client, err := pubsub.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
|
||||
const ELLIPSIS = 0
|
||||
|
||||
//[ pubsub-2
|
||||
// Publish "hello world" on topic1.
|
||||
topic := client.Topic("topic1")
|
||||
res := topic.Publish(ctx, &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
// The publish happens asynchronously.
|
||||
// Later, you can get the result from res:
|
||||
_ = ELLIPSIS
|
||||
msgID, err := res.Get(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use a callback to receive messages via subscription1.
|
||||
sub := client.Subscription("subscription1")
|
||||
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
|
||||
fmt.Println(m.Data)
|
||||
m.Ack() // Acknowledge that we've consumed the message.
|
||||
})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
//]
|
||||
_ = msgID
|
||||
}
|
||||
|
||||
func bqSnippets() {
|
||||
//[ bq-1
|
||||
c, err := bigquery.NewClient(ctx, "my-project-ID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
//]
|
||||
|
||||
//[ bq-2
|
||||
// Construct a query.
|
||||
q := c.Query(`
|
||||
SELECT year, SUM(number)
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
// Execute the query.
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Iterate through the results.
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
//]
|
||||
}
|
||||
|
||||
func loggingSnippets() {
|
||||
//[ logging-1
|
||||
ctx := context.Background()
|
||||
client, err := logging.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
//]
|
||||
//[ logging-2
|
||||
logger := client.Logger("my-log")
|
||||
logger.Log(logging.Entry{Payload: "something happened!"})
|
||||
//]
|
||||
|
||||
//[ logging-3
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
//]
|
||||
}
|
||||
|
||||
func spannerSnippets() {
|
||||
//[ spanner-1
|
||||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
|
||||
//[ spanner-2
|
||||
// Simple Reads And Writes
|
||||
_, err = client.Apply(ctx, []*spanner.Mutation{
|
||||
spanner.Insert("Users",
|
||||
[]string{"name", "email"},
|
||||
[]interface{}{"alice", "a@example.com"})})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
row, err := client.Single().ReadRow(ctx, "Users",
|
||||
spanner.Key{"alice"}, []string{"email"})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//]
|
||||
_ = row
|
||||
}
|
23
vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go
generated
vendored
Normal file
23
vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package readme
|
||||
|
||||
import "fmt"
|
||||
|
||||
func f() {
|
||||
//[
|
||||
fmt.Println()
|
||||
//]
|
||||
}
|
19
vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go
generated
vendored
Normal file
19
vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package readme
|
||||
|
||||
func f() {
|
||||
//]
|
||||
}
|
2
vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md
generated
vendored
Normal file
2
vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
[snip]:# (unknown)
|
||||
|
1
vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md
generated
vendored
Normal file
1
vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
[snip]:# missing-parens
|
21
vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go
generated
vendored
Normal file
21
vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package readme
|
||||
|
||||
// unclosed snippet
|
||||
|
||||
func f() {
|
||||
//[ X
|
||||
}
|
18
vendor/cloud.google.com/go/internal/readme/testdata/good.md
generated
vendored
Normal file
18
vendor/cloud.google.com/go/internal/readme/testdata/good.md
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
This template is for testing snipmd.awk.
|
||||
|
||||
Put the first snippet here.
|
||||
|
||||
[snip]:# (first)
|
||||
|
||||
And now the second.
|
||||
[snip]:# (second)
|
||||
|
||||
A top-level snippet.
|
||||
|
||||
[snip]:# (top-level)
|
||||
|
||||
```go
|
||||
// A code block that is not included.
|
||||
```
|
||||
|
||||
And we're done.
|
39
vendor/cloud.google.com/go/internal/readme/testdata/snips.go
generated
vendored
Normal file
39
vendor/cloud.google.com/go/internal/readme/testdata/snips.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package readme
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func f() {
|
||||
ELLIPSIS := 3
|
||||
//[ first
|
||||
fmt.Println("hello")
|
||||
x := ELLIPSIS
|
||||
//]
|
||||
|
||||
//[ second
|
||||
if x > 2 {
|
||||
_ = ELLIPSIS
|
||||
}
|
||||
//]
|
||||
}
|
||||
|
||||
//[ top-level
|
||||
var ErrBad = errors.New("bad")
|
||||
|
||||
//]
|
30
vendor/cloud.google.com/go/internal/readme/testdata/want.md
generated
vendored
Normal file
30
vendor/cloud.google.com/go/internal/readme/testdata/want.md
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
This template is for testing snipmd.awk.
|
||||
|
||||
Put the first snippet here.
|
||||
|
||||
[snip]:# (first)
|
||||
```go
|
||||
fmt.Println("hello")
|
||||
x := ...
|
||||
```
|
||||
|
||||
And now the second.
|
||||
[snip]:# (second)
|
||||
```go
|
||||
if x > 2 {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
A top-level snippet.
|
||||
|
||||
[snip]:# (top-level)
|
||||
```go
|
||||
var ErrBad = errors.New("bad")
|
||||
```
|
||||
|
||||
```go
|
||||
// A code block that is not included.
|
||||
```
|
||||
|
||||
And we're done.
|
3
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
3
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
|
@ -27,7 +27,8 @@ import (
|
|||
// backoff parameters. It returns when one of the following occurs:
|
||||
// When f's first return value is true, Retry immediately returns with f's second
|
||||
// return value.
|
||||
// When the provided context is done, Retry returns with ctx.Err().
|
||||
// When the provided context is done, Retry returns with an error that
|
||||
// includes both ctx.Error() and the last error returned by f.
|
||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
||||
return retry(ctx, bo, f, gax.Sleep)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue