Update vendored library cloud.google.com/go
This commit is contained in:
parent
a4ff591165
commit
a951e7b126
221 changed files with 19911 additions and 2075 deletions
108
Gopkg.lock
generated
108
Gopkg.lock
generated
|
@ -4,18 +4,14 @@
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "bazil.org/fuse"
|
name = "bazil.org/fuse"
|
||||||
packages = [
|
packages = [".","fs","fuseutil"]
|
||||||
".",
|
|
||||||
"fs",
|
|
||||||
"fuseutil"
|
|
||||||
]
|
|
||||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "cloud.google.com/go"
|
name = "cloud.google.com/go"
|
||||||
packages = ["compute/metadata"]
|
packages = ["compute/metadata"]
|
||||||
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
|
revision = "4b98a6370e36d7a85192e7bad08a4ebd82eac2a8"
|
||||||
version = "v0.18.0"
|
version = "v0.20.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/Azure/azure-sdk-for-go"
|
name = "github.com/Azure/azure-sdk-for-go"
|
||||||
|
@ -25,12 +21,7 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/Azure/go-autorest"
|
name = "github.com/Azure/go-autorest"
|
||||||
packages = [
|
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||||
"autorest",
|
|
||||||
"autorest/adal",
|
|
||||||
"autorest/azure",
|
|
||||||
"autorest/date"
|
|
||||||
]
|
|
||||||
revision = "c2a68353555b68de3ee8455a4fd3e890a0ac6d99"
|
revision = "c2a68353555b68de3ee8455a4fd3e890a0ac6d99"
|
||||||
version = "v9.8.1"
|
version = "v9.8.1"
|
||||||
|
|
||||||
|
@ -96,12 +87,7 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/kurin/blazer"
|
name = "github.com/kurin/blazer"
|
||||||
packages = [
|
packages = ["b2","base","internal/b2types","internal/blog"]
|
||||||
"b2",
|
|
||||||
"base",
|
|
||||||
"internal/b2types",
|
|
||||||
"internal/blog"
|
|
||||||
]
|
|
||||||
revision = "cd0304efa98725679cf68422cefa328d3d96f2f4"
|
revision = "cd0304efa98725679cf68422cefa328d3d96f2f4"
|
||||||
version = "v0.3.0"
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
@ -112,15 +98,7 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/minio/minio-go"
|
name = "github.com/minio/minio-go"
|
||||||
packages = [
|
packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"]
|
||||||
".",
|
|
||||||
"pkg/credentials",
|
|
||||||
"pkg/encrypt",
|
|
||||||
"pkg/policy",
|
|
||||||
"pkg/s3signer",
|
|
||||||
"pkg/s3utils",
|
|
||||||
"pkg/set"
|
|
||||||
]
|
|
||||||
revision = "14f1d472d115bac5ca4804094aa87484a72ced61"
|
revision = "14f1d472d115bac5ca4804094aa87484a72ced61"
|
||||||
version = "4.0.6"
|
version = "4.0.6"
|
||||||
|
|
||||||
|
@ -186,10 +164,7 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/spf13/cobra"
|
name = "github.com/spf13/cobra"
|
||||||
packages = [
|
packages = [".","doc"]
|
||||||
".",
|
|
||||||
"doc"
|
|
||||||
]
|
|
||||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||||
version = "v0.0.1"
|
version = "v0.0.1"
|
||||||
|
|
||||||
|
@ -202,40 +177,19 @@
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/crypto"
|
name = "golang.org/x/crypto"
|
||||||
packages = [
|
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","internal/chacha20","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
|
||||||
"curve25519",
|
|
||||||
"ed25519",
|
|
||||||
"ed25519/internal/edwards25519",
|
|
||||||
"internal/chacha20",
|
|
||||||
"pbkdf2",
|
|
||||||
"poly1305",
|
|
||||||
"scrypt",
|
|
||||||
"ssh",
|
|
||||||
"ssh/terminal"
|
|
||||||
]
|
|
||||||
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
|
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = [
|
packages = ["context","context/ctxhttp","idna","lex/httplex"]
|
||||||
"context",
|
|
||||||
"context/ctxhttp",
|
|
||||||
"idna",
|
|
||||||
"lex/httplex"
|
|
||||||
]
|
|
||||||
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/oauth2"
|
name = "golang.org/x/oauth2"
|
||||||
packages = [
|
packages = [".","google","internal","jws","jwt"]
|
||||||
".",
|
|
||||||
"google",
|
|
||||||
"internal",
|
|
||||||
"jws",
|
|
||||||
"jwt"
|
|
||||||
]
|
|
||||||
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
|
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
@ -247,58 +201,24 @@
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/sys"
|
name = "golang.org/x/sys"
|
||||||
packages = [
|
packages = ["unix","windows"]
|
||||||
"unix",
|
|
||||||
"windows"
|
|
||||||
]
|
|
||||||
revision = "af50095a40f9041b3b38960738837185c26e9419"
|
revision = "af50095a40f9041b3b38960738837185c26e9419"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
packages = [
|
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||||
"collate",
|
|
||||||
"collate/build",
|
|
||||||
"internal/colltab",
|
|
||||||
"internal/gen",
|
|
||||||
"internal/tag",
|
|
||||||
"internal/triegen",
|
|
||||||
"internal/ucd",
|
|
||||||
"language",
|
|
||||||
"secure/bidirule",
|
|
||||||
"transform",
|
|
||||||
"unicode/bidi",
|
|
||||||
"unicode/cldr",
|
|
||||||
"unicode/norm",
|
|
||||||
"unicode/rangetable"
|
|
||||||
]
|
|
||||||
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "google.golang.org/api"
|
name = "google.golang.org/api"
|
||||||
packages = [
|
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||||
"gensupport",
|
|
||||||
"googleapi",
|
|
||||||
"googleapi/internal/uritemplates",
|
|
||||||
"storage/v1"
|
|
||||||
]
|
|
||||||
revision = "65b0d8655182691ad23b4fac11e6f7b897d9b634"
|
revision = "65b0d8655182691ad23b4fac11e6f7b897d9b634"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "google.golang.org/appengine"
|
name = "google.golang.org/appengine"
|
||||||
packages = [
|
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||||
".",
|
|
||||||
"internal",
|
|
||||||
"internal/app_identity",
|
|
||||||
"internal/base",
|
|
||||||
"internal/datastore",
|
|
||||||
"internal/log",
|
|
||||||
"internal/modules",
|
|
||||||
"internal/remote_api",
|
|
||||||
"internal/urlfetch",
|
|
||||||
"urlfetch"
|
|
||||||
]
|
|
||||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||||
version = "v1.0.0"
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
|
106
vendor/cloud.google.com/go/README.md
generated
vendored
106
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -33,6 +33,69 @@ make backwards-incompatible changes.
|
||||||
|
|
||||||
## News
|
## News
|
||||||
|
|
||||||
|
_March 22, 2018_
|
||||||
|
|
||||||
|
*v0.20.*
|
||||||
|
|
||||||
|
- bigquery: Support SchemaUpdateOptions for load jobs.
|
||||||
|
|
||||||
|
- bigtable:
|
||||||
|
- Add SampleRowKeys.
|
||||||
|
- cbt: Support union, intersection GCPolicy.
|
||||||
|
- Retry admin RPCS.
|
||||||
|
- Add trace spans to retries.
|
||||||
|
|
||||||
|
- datastore: Add OpenCensus tracing.
|
||||||
|
|
||||||
|
- firestore:
|
||||||
|
- Fix queries involving Null and NaN.
|
||||||
|
- Allow Timestamp protobuffers for time values.
|
||||||
|
|
||||||
|
- logging: Add a WriteTimeout option.
|
||||||
|
|
||||||
|
- spanner: Support Batch API.
|
||||||
|
|
||||||
|
- storage: Add OpenCensus tracing.
|
||||||
|
|
||||||
|
|
||||||
|
_February 26, 2018_
|
||||||
|
|
||||||
|
*v0.19.0*
|
||||||
|
|
||||||
|
- bigquery:
|
||||||
|
- Support customer-managed encryption keys.
|
||||||
|
|
||||||
|
- bigtable:
|
||||||
|
- Improved emulator support.
|
||||||
|
- Support GetCluster.
|
||||||
|
|
||||||
|
- datastore:
|
||||||
|
- Add general mutations.
|
||||||
|
- Support pointer struct fields.
|
||||||
|
- Support transaction options.
|
||||||
|
|
||||||
|
- firestore:
|
||||||
|
- Add Transaction.GetAll.
|
||||||
|
- Support document cursors.
|
||||||
|
|
||||||
|
- logging:
|
||||||
|
- Support concurrent RPCs to the service.
|
||||||
|
- Support per-entry resources.
|
||||||
|
|
||||||
|
- profiler:
|
||||||
|
- Add config options to disable heap and thread profiling.
|
||||||
|
- Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
|
||||||
|
|
||||||
|
- pubsub:
|
||||||
|
- BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
|
||||||
|
callback returns).
|
||||||
|
- Add SubscriptionInProject.
|
||||||
|
- Add OpenCensus instrumentation for streaming pull.
|
||||||
|
|
||||||
|
- storage:
|
||||||
|
- Support CORS.
|
||||||
|
|
||||||
|
|
||||||
_January 18, 2018_
|
_January 18, 2018_
|
||||||
|
|
||||||
*v0.18.0*
|
*v0.18.0*
|
||||||
|
@ -97,45 +160,6 @@ _December 11, 2017_
|
||||||
- TimePartitioning supports "Field".
|
- TimePartitioning supports "Field".
|
||||||
|
|
||||||
|
|
||||||
_October 30, 2017_
|
|
||||||
|
|
||||||
*v0.16.0*
|
|
||||||
|
|
||||||
- Other bigquery changes:
|
|
||||||
- `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
|
|
||||||
- UseStandardSQL is deprecated; set UseLegacySQL to true if you need
|
|
||||||
Legacy SQL.
|
|
||||||
- Uploader.Put will generate a random insert ID if you do not provide one.
|
|
||||||
- Support time partitioning for load jobs.
|
|
||||||
- Support dry-run queries.
|
|
||||||
- A `Job` remembers its last retrieved status.
|
|
||||||
- Support retrieving job configuration.
|
|
||||||
- Support labels for jobs and tables.
|
|
||||||
- Support dataset access lists.
|
|
||||||
- Improve support for external data sources, including data from Bigtable and
|
|
||||||
Google Sheets, and tables with external data.
|
|
||||||
- Support updating a table's view configuration.
|
|
||||||
- Fix uploading civil times with nanoseconds.
|
|
||||||
|
|
||||||
- storage:
|
|
||||||
- Support PubSub notifications.
|
|
||||||
- Support Requester Pays buckets.
|
|
||||||
|
|
||||||
- profiler: Support goroutine and mutex profile types.
|
|
||||||
|
|
||||||
|
|
||||||
_October 3, 2017_
|
|
||||||
|
|
||||||
*v0.15.0*
|
|
||||||
|
|
||||||
- firestore: beta release. See the
|
|
||||||
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
|
|
||||||
|
|
||||||
- errorreporting: The existing package has been redesigned.
|
|
||||||
|
|
||||||
- errors: This package has been removed. Use errorreporting.
|
|
||||||
|
|
||||||
|
|
||||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||||
|
|
||||||
## Supported APIs
|
## Supported APIs
|
||||||
|
@ -143,7 +167,7 @@ _October 3, 2017_
|
||||||
Google API | Status | Package
|
Google API | Status | Package
|
||||||
---------------------------------|--------------|-----------------------------------------------------------
|
---------------------------------|--------------|-----------------------------------------------------------
|
||||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||||
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
||||||
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
|
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
|
||||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||||
|
@ -153,7 +177,7 @@ Google API | Status | Package
|
||||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||||
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
||||||
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||||
|
|
7
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
7
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
|
@ -47,6 +47,11 @@ func setClientHeader(headers http.Header) {
|
||||||
|
|
||||||
// Client may be used to perform BigQuery operations.
|
// Client may be used to perform BigQuery operations.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
// Location, if set, will be used as the default location for all subsequent
|
||||||
|
// dataset creation and job operations. A location specified directly in one of
|
||||||
|
// those operations will override this value.
|
||||||
|
Location string
|
||||||
|
|
||||||
projectID string
|
projectID string
|
||||||
bqs *bq.Service
|
bqs *bq.Service
|
||||||
}
|
}
|
||||||
|
@ -152,5 +157,5 @@ func retryableError(err error) bool {
|
||||||
if len(e.Errors) > 0 {
|
if len(e.Errors) > 0 {
|
||||||
reason = e.Errors[0].Reason
|
reason = e.Errors[0].Reason
|
||||||
}
|
}
|
||||||
return reason == "backendError" || reason == "rateLimitExceeded"
|
return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
|
||||||
}
|
}
|
||||||
|
|
15
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
15
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
|
@ -37,6 +37,9 @@ type CopyConfig struct {
|
||||||
|
|
||||||
// The labels associated with this job.
|
// The labels associated with this job.
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
|
|
||||||
|
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||||
|
DestinationEncryptionConfig *EncryptionConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||||
|
@ -47,10 +50,11 @@ func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||||
return &bq.JobConfiguration{
|
return &bq.JobConfiguration{
|
||||||
Labels: c.Labels,
|
Labels: c.Labels,
|
||||||
Copy: &bq.JobConfigurationTableCopy{
|
Copy: &bq.JobConfigurationTableCopy{
|
||||||
CreateDisposition: string(c.CreateDisposition),
|
CreateDisposition: string(c.CreateDisposition),
|
||||||
WriteDisposition: string(c.WriteDisposition),
|
WriteDisposition: string(c.WriteDisposition),
|
||||||
DestinationTable: c.Dst.toBQ(),
|
DestinationTable: c.Dst.toBQ(),
|
||||||
SourceTables: ts,
|
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
|
||||||
|
SourceTables: ts,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,6 +65,7 @@ func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
|
||||||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
|
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
|
||||||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
|
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
|
||||||
Dst: bqToTable(q.Copy.DestinationTable, c),
|
Dst: bqToTable(q.Copy.DestinationTable, c),
|
||||||
|
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
|
||||||
}
|
}
|
||||||
for _, t := range q.Copy.SourceTables {
|
for _, t := range q.Copy.SourceTables {
|
||||||
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
|
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
|
||||||
|
@ -95,7 +100,7 @@ func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||||
|
|
||||||
func (c *Copier) newJob() *bq.Job {
|
func (c *Copier) newJob() *bq.Job {
|
||||||
return &bq.Job{
|
return &bq.Job{
|
||||||
JobReference: c.JobIDConfig.createJobRef(c.c.projectID),
|
JobReference: c.JobIDConfig.createJobRef(c.c),
|
||||||
Configuration: c.CopyConfig.toBQ(),
|
Configuration: c.CopyConfig.toBQ(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
40
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
40
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
|
@ -49,11 +49,12 @@ func defaultCopyJob() *bq.Job {
|
||||||
func TestCopy(t *testing.T) {
|
func TestCopy(t *testing.T) {
|
||||||
defer fixRandomID("RANDOM")()
|
defer fixRandomID("RANDOM")()
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
dst *Table
|
dst *Table
|
||||||
srcs []*Table
|
srcs []*Table
|
||||||
jobID string
|
jobID string
|
||||||
config CopyConfig
|
location string
|
||||||
want *bq.Job
|
config CopyConfig
|
||||||
|
want *bq.Job
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
dst: &Table{
|
dst: &Table{
|
||||||
|
@ -84,15 +85,17 @@ func TestCopy(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
config: CopyConfig{
|
config: CopyConfig{
|
||||||
CreateDisposition: CreateNever,
|
CreateDisposition: CreateNever,
|
||||||
WriteDisposition: WriteTruncate,
|
WriteDisposition: WriteTruncate,
|
||||||
Labels: map[string]string{"a": "b"},
|
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||||
|
Labels: map[string]string{"a": "b"},
|
||||||
},
|
},
|
||||||
want: func() *bq.Job {
|
want: func() *bq.Job {
|
||||||
j := defaultCopyJob()
|
j := defaultCopyJob()
|
||||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||||
|
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||||
return j
|
return j
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
@ -116,12 +119,33 @@ func TestCopy(t *testing.T) {
|
||||||
return j
|
return j
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
dst: &Table{
|
||||||
|
ProjectID: "d-project-id",
|
||||||
|
DatasetID: "d-dataset-id",
|
||||||
|
TableID: "d-table-id",
|
||||||
|
},
|
||||||
|
srcs: []*Table{
|
||||||
|
{
|
||||||
|
ProjectID: "s-project-id",
|
||||||
|
DatasetID: "s-dataset-id",
|
||||||
|
TableID: "s-table-id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
location: "asia-northeast1",
|
||||||
|
want: func() *bq.Job {
|
||||||
|
j := defaultCopyJob()
|
||||||
|
j.JobReference.Location = "asia-northeast1"
|
||||||
|
return j
|
||||||
|
}(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
c := &Client{projectID: "client-project-id"}
|
c := &Client{projectID: "client-project-id"}
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
tc.dst.c = c
|
tc.dst.c = c
|
||||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||||
copier.JobID = tc.jobID
|
copier.JobID = tc.jobID
|
||||||
|
copier.Location = tc.location
|
||||||
tc.config.Srcs = tc.srcs
|
tc.config.Srcs = tc.srcs
|
||||||
tc.config.Dst = tc.dst
|
tc.config.Dst = tc.dst
|
||||||
copier.CopyConfig = tc.config
|
copier.CopyConfig = tc.config
|
||||||
|
|
4
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
|
@ -91,6 +91,10 @@ func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
|
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
|
||||||
|
// Use Client.Location as a default.
|
||||||
|
if ds.Location == "" {
|
||||||
|
ds.Location = d.c.Location
|
||||||
|
}
|
||||||
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
|
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
_, err = call.Do()
|
_, err = call.Do()
|
||||||
|
|
2
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
|
@ -55,7 +55,7 @@ func TestDataTransferServiceSmoke(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", projectId, "us-central1")
|
var formattedParent string = fmt.Sprintf("projects/%s", projectId)
|
||||||
var request = &datatransferpb.ListDataSourcesRequest{
|
var request = &datatransferpb.ListDataSourcesRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
|
2
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
|
@ -294,7 +294,7 @@ func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.Li
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time].
|
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
|
||||||
// For each date - or whatever granularity the data source supports - in the
|
// For each date - or whatever granularity the data source supports - in the
|
||||||
// range, one transfer run is created.
|
// range, one transfer run is created.
|
||||||
// Note that runs are created per UTC time in the time range.
|
// Note that runs are created per UTC time in the time range.
|
||||||
|
|
2
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
|
@ -42,8 +42,6 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||||
func DefaultAuthScopes() []string {
|
func DefaultAuthScopes() []string {
|
||||||
return []string{
|
return []string{
|
||||||
"https://www.googleapis.com/auth/bigquery",
|
|
||||||
"https://www.googleapis.com/auth/cloud-platform",
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
48
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
48
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
|
@ -281,7 +281,7 @@ func TestDataTransferServiceGetDataSource(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
|
||||||
var request = &datatransferpb.GetDataSourceRequest{
|
var request = &datatransferpb.GetDataSourceRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -310,7 +310,7 @@ func TestDataTransferServiceGetDataSourceError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
|
||||||
var request = &datatransferpb.GetDataSourceRequest{
|
var request = &datatransferpb.GetDataSourceRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -343,7 +343,7 @@ func TestDataTransferServiceListDataSources(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
var request = &datatransferpb.ListDataSourcesRequest{
|
var request = &datatransferpb.ListDataSourcesRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -382,7 +382,7 @@ func TestDataTransferServiceListDataSourcesError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
var request = &datatransferpb.ListDataSourcesRequest{
|
var request = &datatransferpb.ListDataSourcesRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -428,7 +428,7 @@ func TestDataTransferServiceCreateTransferConfig(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
|
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
|
||||||
var request = &datatransferpb.CreateTransferConfigRequest{
|
var request = &datatransferpb.CreateTransferConfigRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
|
@ -459,7 +459,7 @@ func TestDataTransferServiceCreateTransferConfigError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
|
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
|
||||||
var request = &datatransferpb.CreateTransferConfigRequest{
|
var request = &datatransferpb.CreateTransferConfigRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
|
@ -567,7 +567,7 @@ func TestDataTransferServiceDeleteTransferConfig(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var request = &datatransferpb.DeleteTransferConfigRequest{
|
var request = &datatransferpb.DeleteTransferConfigRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -593,7 +593,7 @@ func TestDataTransferServiceDeleteTransferConfigError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var request = &datatransferpb.DeleteTransferConfigRequest{
|
var request = &datatransferpb.DeleteTransferConfigRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -638,7 +638,7 @@ func TestDataTransferServiceGetTransferConfig(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var request = &datatransferpb.GetTransferConfigRequest{
|
var request = &datatransferpb.GetTransferConfigRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -667,7 +667,7 @@ func TestDataTransferServiceGetTransferConfigError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var request = &datatransferpb.GetTransferConfigRequest{
|
var request = &datatransferpb.GetTransferConfigRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -700,7 +700,7 @@ func TestDataTransferServiceListTransferConfigs(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
var request = &datatransferpb.ListTransferConfigsRequest{
|
var request = &datatransferpb.ListTransferConfigsRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -739,7 +739,7 @@ func TestDataTransferServiceListTransferConfigsError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||||
var request = &datatransferpb.ListTransferConfigsRequest{
|
var request = &datatransferpb.ListTransferConfigsRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -766,7 +766,7 @@ func TestDataTransferServiceScheduleTransferRuns(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||||
var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||||
var request = &datatransferpb.ScheduleTransferRunsRequest{
|
var request = &datatransferpb.ScheduleTransferRunsRequest{
|
||||||
|
@ -799,7 +799,7 @@ func TestDataTransferServiceScheduleTransferRunsError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||||
var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||||
var request = &datatransferpb.ScheduleTransferRunsRequest{
|
var request = &datatransferpb.ScheduleTransferRunsRequest{
|
||||||
|
@ -841,7 +841,7 @@ func TestDataTransferServiceGetTransferRun(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||||
var request = &datatransferpb.GetTransferRunRequest{
|
var request = &datatransferpb.GetTransferRunRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -870,7 +870,7 @@ func TestDataTransferServiceGetTransferRunError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||||
var request = &datatransferpb.GetTransferRunRequest{
|
var request = &datatransferpb.GetTransferRunRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -897,7 +897,7 @@ func TestDataTransferServiceDeleteTransferRun(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||||
var request = &datatransferpb.DeleteTransferRunRequest{
|
var request = &datatransferpb.DeleteTransferRunRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -923,7 +923,7 @@ func TestDataTransferServiceDeleteTransferRunError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||||
var request = &datatransferpb.DeleteTransferRunRequest{
|
var request = &datatransferpb.DeleteTransferRunRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -955,7 +955,7 @@ func TestDataTransferServiceListTransferRuns(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var request = &datatransferpb.ListTransferRunsRequest{
|
var request = &datatransferpb.ListTransferRunsRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -994,7 +994,7 @@ func TestDataTransferServiceListTransferRunsError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
|
||||||
var request = &datatransferpb.ListTransferRunsRequest{
|
var request = &datatransferpb.ListTransferRunsRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -1027,7 +1027,7 @@ func TestDataTransferServiceListTransferLogs(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||||
var request = &datatransferpb.ListTransferLogsRequest{
|
var request = &datatransferpb.ListTransferLogsRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -1066,7 +1066,7 @@ func TestDataTransferServiceListTransferLogsError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||||
var request = &datatransferpb.ListTransferLogsRequest{
|
var request = &datatransferpb.ListTransferLogsRequest{
|
||||||
Parent: formattedParent,
|
Parent: formattedParent,
|
||||||
}
|
}
|
||||||
|
@ -1096,7 +1096,7 @@ func TestDataTransferServiceCheckValidCreds(t *testing.T) {
|
||||||
|
|
||||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
|
||||||
var request = &datatransferpb.CheckValidCredsRequest{
|
var request = &datatransferpb.CheckValidCredsRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
@ -1125,7 +1125,7 @@ func TestDataTransferServiceCheckValidCredsError(t *testing.T) {
|
||||||
errCode := codes.PermissionDenied
|
errCode := codes.PermissionDenied
|
||||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||||
|
|
||||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
|
||||||
var request = &datatransferpb.CheckValidCredsRequest{
|
var request = &datatransferpb.CheckValidCredsRequest{
|
||||||
Name: formattedName,
|
Name: formattedName,
|
||||||
}
|
}
|
||||||
|
|
44
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
44
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
|
@ -131,6 +131,23 @@ func ExampleClient_Query_parameters() {
|
||||||
// TODO: Call Query.Run or Query.Read.
|
// TODO: Call Query.Run or Query.Read.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This example demonstrates how to run a query job on a table
|
||||||
|
// with a customer-managed encryption key. The same
|
||||||
|
// applies to load and copy jobs as well.
|
||||||
|
func ExampleClient_Query_encryptionKey() {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
q := client.Query("select name, num from t1")
|
||||||
|
// TODO: Replace this key with a key you have created in Cloud KMS.
|
||||||
|
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
|
||||||
|
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName}
|
||||||
|
// TODO: set other options on the Query.
|
||||||
|
// TODO: Call Query.Run or Query.Read.
|
||||||
|
}
|
||||||
|
|
||||||
func ExampleQuery_Read() {
|
func ExampleQuery_Read() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
|
@ -453,6 +470,33 @@ func ExampleTable_Create_initialize() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This example demonstrates how to create a table with
|
||||||
|
// a customer-managed encryption key.
|
||||||
|
func ExampleTable_Create_encryptionKey() {
|
||||||
|
ctx := context.Background()
|
||||||
|
// Infer table schema from a Go type.
|
||||||
|
schema, err := bigquery.InferSchema(Item{})
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
t := client.Dataset("my_dataset").Table("new-table")
|
||||||
|
|
||||||
|
// TODO: Replace this key with a key you have created in Cloud KMS.
|
||||||
|
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
|
||||||
|
if err := t.Create(ctx,
|
||||||
|
&bigquery.TableMetadata{
|
||||||
|
Name: "My New Table",
|
||||||
|
Schema: schema,
|
||||||
|
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName},
|
||||||
|
}); err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ExampleTable_Delete() {
|
func ExampleTable_Delete() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := bigquery.NewClient(ctx, "project-id")
|
client, err := bigquery.NewClient(ctx, "project-id")
|
||||||
|
|
2
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
|
@ -99,7 +99,7 @@ func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||||
|
|
||||||
func (e *Extractor) newJob() *bq.Job {
|
func (e *Extractor) newJob() *bq.Job {
|
||||||
return &bq.Job{
|
return &bq.Job{
|
||||||
JobReference: e.JobIDConfig.createJobRef(e.c.projectID),
|
JobReference: e.JobIDConfig.createJobRef(e.c),
|
||||||
Configuration: e.ExtractConfig.toBQ(),
|
Configuration: e.ExtractConfig.toBQ(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
270
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
270
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
|
@ -59,6 +59,9 @@ var (
|
||||||
tableIDs = testutil.NewUIDSpaceSep("table", '_')
|
tableIDs = testutil.NewUIDSpaceSep("table", '_')
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Note: integration tests cannot be run in parallel, because TestIntegration_Location
|
||||||
|
// modifies the client.
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
cleanup := initIntegrationTest()
|
cleanup := initIntegrationTest()
|
||||||
r := m.Run()
|
r := m.Run()
|
||||||
|
@ -691,14 +694,14 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||||
table := newTable(t, schema)
|
table := newTable(t, schema)
|
||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
d := civil.Date{2016, 3, 20}
|
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||||
tm := civil.Time{15, 4, 5, 6000}
|
tm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
|
||||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
|
ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
|
||||||
dtm := civil.DateTime{d, tm}
|
dtm := civil.DateTime{Date: d, Time: tm}
|
||||||
d2 := civil.Date{1994, 5, 15}
|
d2 := civil.Date{Year: 1994, Month: 5, Day: 15}
|
||||||
tm2 := civil.Time{1, 2, 4, 0}
|
tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0}
|
||||||
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
||||||
dtm2 := civil.DateTime{d2, tm2}
|
dtm2 := civil.DateTime{Date: d2, Time: tm2}
|
||||||
|
|
||||||
// Populate the table.
|
// Populate the table.
|
||||||
upl := table.Uploader()
|
upl := table.Uploader()
|
||||||
|
@ -797,8 +800,8 @@ func TestIntegration_UploadAndReadNullable(t *testing.T) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
t.Skip("Integration tests skipped")
|
t.Skip("Integration tests skipped")
|
||||||
}
|
}
|
||||||
ctm := civil.Time{15, 4, 5, 6000}
|
ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
|
||||||
cdt := civil.DateTime{testDate, ctm}
|
cdt := civil.DateTime{Date: testDate, Time: ctm}
|
||||||
testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
|
testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
|
||||||
testUploadAndReadNullable(t, testStructNullable{
|
testUploadAndReadNullable(t, testStructNullable{
|
||||||
String: NullString{"x", true},
|
String: NullString{"x", true},
|
||||||
|
@ -943,23 +946,23 @@ func TestIntegration_TableUpdate(t *testing.T) {
|
||||||
// Error cases when updating schema.
|
// Error cases when updating schema.
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
desc string
|
desc string
|
||||||
fields []*FieldSchema
|
fields Schema
|
||||||
}{
|
}{
|
||||||
{"change from optional to required", []*FieldSchema{
|
{"change from optional to required", Schema{
|
||||||
{Name: "name", Type: StringFieldType, Required: true},
|
{Name: "name", Type: StringFieldType, Required: true},
|
||||||
schema3[1],
|
schema3[1],
|
||||||
schema3[2],
|
schema3[2],
|
||||||
schema3[3],
|
schema3[3],
|
||||||
}},
|
}},
|
||||||
{"add a required field", []*FieldSchema{
|
{"add a required field", Schema{
|
||||||
schema3[0], schema3[1], schema3[2], schema3[3],
|
schema3[0], schema3[1], schema3[2], schema3[3],
|
||||||
{Name: "req", Type: StringFieldType, Required: true},
|
{Name: "req", Type: StringFieldType, Required: true},
|
||||||
}},
|
}},
|
||||||
{"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}},
|
{"remove a field", Schema{schema3[0], schema3[1], schema3[2]}},
|
||||||
{"remove a nested field", []*FieldSchema{
|
{"remove a nested field", Schema{
|
||||||
schema3[0], schema3[1], schema3[2],
|
schema3[0], schema3[1], schema3[2],
|
||||||
{Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
|
{Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
|
||||||
{"remove all nested fields", []*FieldSchema{
|
{"remove all nested fields", Schema{
|
||||||
schema3[0], schema3[1], schema3[2],
|
schema3[0], schema3[1], schema3[2],
|
||||||
{Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
|
{Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
|
||||||
} {
|
} {
|
||||||
|
@ -1085,9 +1088,9 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
||||||
table := newTable(t, dtSchema)
|
table := newTable(t, dtSchema)
|
||||||
defer table.Delete(ctx)
|
defer table.Delete(ctx)
|
||||||
|
|
||||||
d := civil.Date{2016, 3, 20}
|
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||||
tm := civil.Time{12, 30, 0, 6000}
|
tm := civil.Time{Hour: 12, Minute: 30, Second: 0, Nanosecond: 6000}
|
||||||
dtm := civil.DateTime{d, tm}
|
dtm := civil.DateTime{Date: d, Time: tm}
|
||||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||||
wantRows := [][]Value{
|
wantRows := [][]Value{
|
||||||
[]Value{d, tm, dtm, ts},
|
[]Value{d, tm, dtm, ts},
|
||||||
|
@ -1121,8 +1124,8 @@ func TestIntegration_StandardQuery(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
d := civil.Date{2016, 3, 20}
|
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||||
tm := civil.Time{15, 04, 05, 0}
|
tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 0}
|
||||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||||
dtm := ts.Format("2006-01-02 15:04:05")
|
dtm := ts.Format("2006-01-02 15:04:05")
|
||||||
|
|
||||||
|
@ -1147,7 +1150,7 @@ func TestIntegration_StandardQuery(t *testing.T) {
|
||||||
{fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}},
|
{fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}},
|
||||||
{fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}},
|
{fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}},
|
||||||
{fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}},
|
{fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}},
|
||||||
{fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{d, tm}}},
|
{fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{Date: d, Time: tm}}},
|
||||||
{fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}},
|
{fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}},
|
||||||
{fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}},
|
{fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}},
|
||||||
{"SELECT (1, 2)", []Value{ints(1, 2)}},
|
{"SELECT (1, 2)", []Value{ints(1, 2)}},
|
||||||
|
@ -1206,9 +1209,11 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
d := civil.Date{2016, 3, 20}
|
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||||
tm := civil.Time{15, 04, 05, 0}
|
tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 3008}
|
||||||
dtm := civil.DateTime{d, tm}
|
rtm := tm
|
||||||
|
rtm.Nanosecond = 3000 // round to microseconds
|
||||||
|
dtm := civil.DateTime{Date: d, Time: tm}
|
||||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||||
|
|
||||||
type ss struct {
|
type ss struct {
|
||||||
|
@ -1226,20 +1231,93 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
||||||
query string
|
query string
|
||||||
parameters []QueryParameter
|
parameters []QueryParameter
|
||||||
wantRow []Value
|
wantRow []Value
|
||||||
|
wantConfig interface{}
|
||||||
}{
|
}{
|
||||||
{"SELECT @val", []QueryParameter{{"val", 1}}, []Value{int64(1)}},
|
{
|
||||||
{"SELECT @val", []QueryParameter{{"val", 1.3}}, []Value{1.3}},
|
"SELECT @val",
|
||||||
{"SELECT @val", []QueryParameter{{"val", true}}, []Value{true}},
|
[]QueryParameter{{"val", 1}},
|
||||||
{"SELECT @val", []QueryParameter{{"val", "ABC"}}, []Value{"ABC"}},
|
[]Value{int64(1)},
|
||||||
{"SELECT @val", []QueryParameter{{"val", []byte("foo")}}, []Value{[]byte("foo")}},
|
int64(1),
|
||||||
{"SELECT @val", []QueryParameter{{"val", ts}}, []Value{ts}},
|
},
|
||||||
{"SELECT @val", []QueryParameter{{"val", []time.Time{ts, ts}}}, []Value{[]Value{ts, ts}}},
|
{
|
||||||
{"SELECT @val", []QueryParameter{{"val", dtm}}, []Value{dtm}},
|
"SELECT @val",
|
||||||
{"SELECT @val", []QueryParameter{{"val", d}}, []Value{d}},
|
[]QueryParameter{{"val", 1.3}},
|
||||||
{"SELECT @val", []QueryParameter{{"val", tm}}, []Value{tm}},
|
[]Value{1.3},
|
||||||
{"SELECT @val", []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
|
1.3,
|
||||||
[]Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}},
|
},
|
||||||
{"SELECT @val.Timestamp, @val.SubStruct.String", []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, []Value{ts, "a"}},
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", true}},
|
||||||
|
[]Value{true},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", "ABC"}},
|
||||||
|
[]Value{"ABC"},
|
||||||
|
"ABC",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", []byte("foo")}},
|
||||||
|
[]Value{[]byte("foo")},
|
||||||
|
[]byte("foo"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", ts}},
|
||||||
|
[]Value{ts},
|
||||||
|
ts,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", []time.Time{ts, ts}}},
|
||||||
|
[]Value{[]Value{ts, ts}},
|
||||||
|
[]interface{}{ts, ts},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", dtm}},
|
||||||
|
[]Value{civil.DateTime{Date: d, Time: rtm}},
|
||||||
|
civil.DateTime{Date: d, Time: rtm},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", d}},
|
||||||
|
[]Value{d},
|
||||||
|
d,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", tm}},
|
||||||
|
[]Value{rtm},
|
||||||
|
rtm,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val",
|
||||||
|
[]QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
|
||||||
|
[]Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}},
|
||||||
|
map[string]interface{}{
|
||||||
|
"Timestamp": ts,
|
||||||
|
"StringArray": []interface{}{"a", "b"},
|
||||||
|
"SubStruct": map[string]interface{}{"String": "c"},
|
||||||
|
"SubStructArray": []interface{}{
|
||||||
|
map[string]interface{}{"String": "d"},
|
||||||
|
map[string]interface{}{"String": "e"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SELECT @val.Timestamp, @val.SubStruct.String",
|
||||||
|
[]QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}},
|
||||||
|
[]Value{ts, "a"},
|
||||||
|
map[string]interface{}{
|
||||||
|
"Timestamp": ts,
|
||||||
|
"SubStruct": map[string]interface{}{"String": "a"},
|
||||||
|
"StringArray": nil,
|
||||||
|
"SubStructArray": nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, c := range testCases {
|
for _, c := range testCases {
|
||||||
q := client.Query(c.query)
|
q := client.Query(c.query)
|
||||||
|
@ -1256,6 +1334,15 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
checkRead(t, "QueryParameters", it, [][]Value{c.wantRow})
|
checkRead(t, "QueryParameters", it, [][]Value{c.wantRow})
|
||||||
|
config, err := job.Config()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got := config.(*QueryConfig).Parameters[0].Value
|
||||||
|
if !testutil.Equal(got, c.wantConfig) {
|
||||||
|
t.Errorf("param %[1]v (%[1]T): config:\ngot %[2]v (%[2]T)\nwant %[3]v (%[3]T)",
|
||||||
|
c.parameters[0].Value, got, c.wantConfig)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1519,6 +1606,117 @@ func TestIntegration_ListJobs(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const tokyo = "asia-northeast1"
|
||||||
|
|
||||||
|
func TestIntegration_Location(t *testing.T) {
|
||||||
|
if client == nil {
|
||||||
|
t.Skip("Integration tests skipped")
|
||||||
|
}
|
||||||
|
client.Location = ""
|
||||||
|
testLocation(t, tokyo)
|
||||||
|
client.Location = tokyo
|
||||||
|
defer func() {
|
||||||
|
client.Location = ""
|
||||||
|
}()
|
||||||
|
testLocation(t, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testLocation(t *testing.T, loc string) {
|
||||||
|
ctx := context.Background()
|
||||||
|
tokyoDataset := client.Dataset("tokyo")
|
||||||
|
err := tokyoDataset.Create(ctx, &DatasetMetadata{Location: loc})
|
||||||
|
if err != nil && !hasStatusCode(err, 409) { // 409 = already exists
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
md, err := tokyoDataset.Metadata(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if md.Location != tokyo {
|
||||||
|
t.Fatalf("dataset location: got %s, want %s", md.Location, tokyo)
|
||||||
|
}
|
||||||
|
table := tokyoDataset.Table(tableIDs.New())
|
||||||
|
err = table.Create(context.Background(), &TableMetadata{
|
||||||
|
Schema: Schema{
|
||||||
|
{Name: "name", Type: StringFieldType},
|
||||||
|
{Name: "nums", Type: IntegerFieldType},
|
||||||
|
},
|
||||||
|
ExpirationTime: testTableExpiration,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer table.Delete(ctx)
|
||||||
|
loader := table.LoaderFrom(NewReaderSource(strings.NewReader("a,0\nb,1\nc,2\n")))
|
||||||
|
loader.Location = loc
|
||||||
|
job, err := loader.Run(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("loader.Run", err)
|
||||||
|
}
|
||||||
|
if job.Location() != tokyo {
|
||||||
|
t.Fatalf("job location: got %s, want %s", job.Location(), tokyo)
|
||||||
|
}
|
||||||
|
_, err = client.JobFromID(ctx, job.ID())
|
||||||
|
if client.Location == "" && err == nil {
|
||||||
|
t.Error("JobFromID with Tokyo job, no client location: want error, got nil")
|
||||||
|
}
|
||||||
|
if client.Location != "" && err != nil {
|
||||||
|
t.Errorf("JobFromID with Tokyo job, with client location: want nil, got %v", err)
|
||||||
|
}
|
||||||
|
_, err = client.JobFromIDLocation(ctx, job.ID(), "US")
|
||||||
|
if err == nil {
|
||||||
|
t.Error("JobFromIDLocation with US: want error, got nil")
|
||||||
|
}
|
||||||
|
job2, err := client.JobFromIDLocation(ctx, job.ID(), loc)
|
||||||
|
if loc == tokyo && err != nil {
|
||||||
|
t.Errorf("loc=tokyo: %v", err)
|
||||||
|
}
|
||||||
|
if loc == "" && err == nil {
|
||||||
|
t.Error("loc empty: got nil, want error")
|
||||||
|
}
|
||||||
|
if job2 != nil && (job2.ID() != job.ID() || job2.Location() != tokyo) {
|
||||||
|
t.Errorf("got id %s loc %s, want id%s loc %s", job2.ID(), job2.Location(), job.ID(), tokyo)
|
||||||
|
}
|
||||||
|
if err := wait(ctx, job); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Cancel should succeed even if the job is done.
|
||||||
|
if err := job.Cancel(ctx); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
q := client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
|
||||||
|
q.Location = loc
|
||||||
|
iter, err := q.Read(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
wantRows := [][]Value{
|
||||||
|
[]Value{"a", int64(0)},
|
||||||
|
[]Value{"b", int64(1)},
|
||||||
|
[]Value{"c", int64(2)},
|
||||||
|
}
|
||||||
|
checkRead(t, "location", iter, wantRows)
|
||||||
|
|
||||||
|
table2 := tokyoDataset.Table(tableIDs.New())
|
||||||
|
copier := table2.CopierFrom(table)
|
||||||
|
copier.Location = loc
|
||||||
|
if _, err := copier.Run(ctx); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
bucketName := testutil.ProjID()
|
||||||
|
objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
|
||||||
|
uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
|
||||||
|
defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
|
||||||
|
gr := NewGCSReference(uri)
|
||||||
|
gr.DestinationFormat = CSV
|
||||||
|
e := table.ExtractorTo(gr)
|
||||||
|
e.Location = loc
|
||||||
|
if _, err := e.Run(ctx); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Creates a new, temporary table with a unique name and the given schema.
|
// Creates a new, temporary table with a unique name and the given schema.
|
||||||
func newTable(t *testing.T, s Schema) *Table {
|
func newTable(t *testing.T, s Schema) *Table {
|
||||||
table := dataset.Table(tableIDs.New())
|
table := dataset.Table(tableIDs.New())
|
||||||
|
|
50
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
50
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
|
@ -35,6 +35,7 @@ type Job struct {
|
||||||
c *Client
|
c *Client
|
||||||
projectID string
|
projectID string
|
||||||
jobID string
|
jobID string
|
||||||
|
location string
|
||||||
|
|
||||||
config *bq.JobConfiguration
|
config *bq.JobConfiguration
|
||||||
lastStatus *JobStatus
|
lastStatus *JobStatus
|
||||||
|
@ -43,8 +44,18 @@ type Job struct {
|
||||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||||
// need not have been created by this package. For example, the job may have
|
// need not have been created by this package. For example, the job may have
|
||||||
// been created in the BigQuery console.
|
// been created in the BigQuery console.
|
||||||
|
//
|
||||||
|
// For jobs whose location is other than "US" or "EU", set Client.Location or use
|
||||||
|
// JobFromIDLocation.
|
||||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||||
bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics")
|
return c.JobFromIDLocation(ctx, id, c.Location)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
|
||||||
|
// need not have been created by this package (for example, it may have
|
||||||
|
// been created in the BigQuery console), but it must exist in the specified location.
|
||||||
|
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (*Job, error) {
|
||||||
|
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -56,6 +67,11 @@ func (j *Job) ID() string {
|
||||||
return j.jobID
|
return j.jobID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Location returns the job's location.
|
||||||
|
func (j *Job) Location() string {
|
||||||
|
return j.location
|
||||||
|
}
|
||||||
|
|
||||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||||
type State int
|
type State int
|
||||||
|
|
||||||
|
@ -120,14 +136,20 @@ type JobIDConfig struct {
|
||||||
|
|
||||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||||
AddJobIDSuffix bool
|
AddJobIDSuffix bool
|
||||||
|
|
||||||
|
// Location is the location for the job.
|
||||||
|
Location string
|
||||||
}
|
}
|
||||||
|
|
||||||
// createJobRef creates a JobReference.
|
// createJobRef creates a JobReference.
|
||||||
// projectID must be non-empty.
|
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
|
||||||
func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference {
|
|
||||||
// We don't check whether projectID is empty; the server will return an
|
// We don't check whether projectID is empty; the server will return an
|
||||||
// error when it encounters the resulting JobReference.
|
// error when it encounters the resulting JobReference.
|
||||||
jr := &bq.JobReference{ProjectId: projectID}
|
loc := j.Location
|
||||||
|
if loc == "" { // Use Client.Location as a default.
|
||||||
|
loc = c.Location
|
||||||
|
}
|
||||||
|
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
|
||||||
if j.JobID == "" {
|
if j.JobID == "" {
|
||||||
jr.JobId = randomIDFn()
|
jr.JobId = randomIDFn()
|
||||||
} else if j.AddJobIDSuffix {
|
} else if j.AddJobIDSuffix {
|
||||||
|
@ -176,7 +198,7 @@ func (s *JobStatus) Err() error {
|
||||||
|
|
||||||
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
|
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
|
||||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||||
bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics")
|
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -204,6 +226,7 @@ func (j *Job) Cancel(ctx context.Context) error {
|
||||||
// to poll for the job status to see if the cancel completed
|
// to poll for the job status to see if the cancel completed
|
||||||
// successfully". So it would be misleading to return a status.
|
// successfully". So it would be misleading to return a status.
|
||||||
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
|
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
|
||||||
|
Location(j.location).
|
||||||
Fields(). // We don't need any of the response data.
|
Fields(). // We don't need any of the response data.
|
||||||
Context(ctx)
|
Context(ctx)
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
|
@ -261,14 +284,17 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
|
||||||
}
|
}
|
||||||
destTable := j.config.Query.DestinationTable
|
destTable := j.config.Query.DestinationTable
|
||||||
// The destination table should only be nil if there was a query error.
|
// The destination table should only be nil if there was a query error.
|
||||||
if destTable == nil {
|
projectID := j.projectID
|
||||||
return nil, errors.New("bigquery: query job missing destination table")
|
if destTable != nil && projectID != destTable.ProjectId {
|
||||||
|
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
|
||||||
}
|
}
|
||||||
projectID := destTable.ProjectId
|
|
||||||
schema, err := waitForQuery(ctx, projectID)
|
schema, err := waitForQuery(ctx, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if destTable == nil {
|
||||||
|
return nil, errors.New("bigquery: query job missing destination table")
|
||||||
|
}
|
||||||
dt := bqToTable(destTable, j.c)
|
dt := bqToTable(destTable, j.c)
|
||||||
it := newRowIterator(ctx, dt, pf)
|
it := newRowIterator(ctx, dt, pf)
|
||||||
it.Schema = schema
|
it.Schema = schema
|
||||||
|
@ -278,7 +304,7 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
|
||||||
// waitForQuery waits for the query job to complete and returns its schema.
|
// waitForQuery waits for the query job to complete and returns its schema.
|
||||||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
|
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
|
||||||
// Use GetQueryResults only to wait for completion, not to read results.
|
// Use GetQueryResults only to wait for completion, not to read results.
|
||||||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0)
|
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
backoff := gax.Backoff{
|
backoff := gax.Backoff{
|
||||||
Initial: 1 * time.Second,
|
Initial: 1 * time.Second,
|
||||||
|
@ -522,9 +548,12 @@ func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
|
||||||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
|
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
|
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||||
var job *bq.Job
|
var job *bq.Job
|
||||||
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
|
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
|
||||||
|
if location != "" {
|
||||||
|
call = call.Location(location)
|
||||||
|
}
|
||||||
if len(fields) > 0 {
|
if len(fields) > 0 {
|
||||||
call = call.Fields(fields...)
|
call = call.Fields(fields...)
|
||||||
}
|
}
|
||||||
|
@ -547,6 +576,7 @@ func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt
|
||||||
j := &Job{
|
j := &Job{
|
||||||
projectID: qr.ProjectId,
|
projectID: qr.ProjectId,
|
||||||
jobID: qr.JobId,
|
jobID: qr.JobId,
|
||||||
|
location: qr.Location,
|
||||||
c: c,
|
c: c,
|
||||||
}
|
}
|
||||||
j.setConfig(qc)
|
j.setConfig(qc)
|
||||||
|
|
55
vendor/cloud.google.com/go/bigquery/job_test.go
generated
vendored
55
vendor/cloud.google.com/go/bigquery/job_test.go
generated
vendored
|
@ -23,37 +23,52 @@ import (
|
||||||
|
|
||||||
func TestCreateJobRef(t *testing.T) {
|
func TestCreateJobRef(t *testing.T) {
|
||||||
defer fixRandomID("RANDOM")()
|
defer fixRandomID("RANDOM")()
|
||||||
|
cNoLoc := &Client{projectID: "projectID"}
|
||||||
|
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"}
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
jobID string
|
in JobIDConfig
|
||||||
addJobIDSuffix bool
|
client *Client
|
||||||
want string
|
want *bq.JobReference
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
jobID: "foo",
|
in: JobIDConfig{JobID: "foo"},
|
||||||
addJobIDSuffix: false,
|
want: &bq.JobReference{JobId: "foo"},
|
||||||
want: "foo",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
jobID: "",
|
in: JobIDConfig{},
|
||||||
addJobIDSuffix: false,
|
want: &bq.JobReference{JobId: "RANDOM"},
|
||||||
want: "RANDOM",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
jobID: "",
|
in: JobIDConfig{AddJobIDSuffix: true},
|
||||||
addJobIDSuffix: true, // irrelevant
|
want: &bq.JobReference{JobId: "RANDOM"},
|
||||||
want: "RANDOM",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
jobID: "foo",
|
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true},
|
||||||
addJobIDSuffix: true,
|
want: &bq.JobReference{JobId: "foo-RANDOM"},
|
||||||
want: "foo-RANDOM",
|
},
|
||||||
|
{
|
||||||
|
in: JobIDConfig{JobID: "foo", Location: "loc"},
|
||||||
|
want: &bq.JobReference{JobId: "foo", Location: "loc"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: JobIDConfig{JobID: "foo"},
|
||||||
|
client: cLoc,
|
||||||
|
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: JobIDConfig{JobID: "foo", Location: "loc"},
|
||||||
|
client: cLoc,
|
||||||
|
want: &bq.JobReference{JobId: "foo", Location: "loc"},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix}
|
client := test.client
|
||||||
jr := jc.createJobRef("projectID")
|
if client == nil {
|
||||||
got := jr.JobId
|
client = cNoLoc
|
||||||
if got != test.want {
|
}
|
||||||
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
|
got := test.in.createJobRef(client)
|
||||||
|
test.want.ProjectId = "projectID"
|
||||||
|
if !testutil.Equal(got, test.want) {
|
||||||
|
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
31
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
31
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
|
@ -42,16 +42,25 @@ type LoadConfig struct {
|
||||||
|
|
||||||
// If non-nil, the destination table is partitioned by time.
|
// If non-nil, the destination table is partitioned by time.
|
||||||
TimePartitioning *TimePartitioning
|
TimePartitioning *TimePartitioning
|
||||||
|
|
||||||
|
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||||
|
DestinationEncryptionConfig *EncryptionConfig
|
||||||
|
|
||||||
|
// SchemaUpdateOptions allows the schema of the destination table to be
|
||||||
|
// updated as a side effect of the load job.
|
||||||
|
SchemaUpdateOptions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||||
config := &bq.JobConfiguration{
|
config := &bq.JobConfiguration{
|
||||||
Labels: l.Labels,
|
Labels: l.Labels,
|
||||||
Load: &bq.JobConfigurationLoad{
|
Load: &bq.JobConfigurationLoad{
|
||||||
CreateDisposition: string(l.CreateDisposition),
|
CreateDisposition: string(l.CreateDisposition),
|
||||||
WriteDisposition: string(l.WriteDisposition),
|
WriteDisposition: string(l.WriteDisposition),
|
||||||
DestinationTable: l.Dst.toBQ(),
|
DestinationTable: l.Dst.toBQ(),
|
||||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||||
|
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
|
||||||
|
SchemaUpdateOptions: l.SchemaUpdateOptions,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
media := l.Src.populateLoadConfig(config.Load)
|
media := l.Src.populateLoadConfig(config.Load)
|
||||||
|
@ -60,11 +69,13 @@ func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||||
|
|
||||||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
||||||
lc := &LoadConfig{
|
lc := &LoadConfig{
|
||||||
Labels: q.Labels,
|
Labels: q.Labels,
|
||||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
||||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||||
|
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
|
||||||
|
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
|
||||||
}
|
}
|
||||||
var fc *FileConfig
|
var fc *FileConfig
|
||||||
if len(q.Load.SourceUris) == 0 {
|
if len(q.Load.SourceUris) == 0 {
|
||||||
|
@ -120,7 +131,7 @@ func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||||
func (l *Loader) newJob() (*bq.Job, io.Reader) {
|
func (l *Loader) newJob() (*bq.Job, io.Reader) {
|
||||||
config, media := l.LoadConfig.toBQ()
|
config, media := l.LoadConfig.toBQ()
|
||||||
return &bq.Job{
|
return &bq.Job{
|
||||||
JobReference: l.JobIDConfig.createJobRef(l.c.projectID),
|
JobReference: l.JobIDConfig.createJobRef(l.c),
|
||||||
Configuration: config,
|
Configuration: config,
|
||||||
}, media
|
}, media
|
||||||
}
|
}
|
||||||
|
|
34
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
34
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
|
@ -74,25 +74,38 @@ func TestLoad(t *testing.T) {
|
||||||
c := &Client{projectID: "client-project-id"}
|
c := &Client{projectID: "client-project-id"}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
dst *Table
|
dst *Table
|
||||||
src LoadSource
|
src LoadSource
|
||||||
jobID string
|
jobID string
|
||||||
config LoadConfig
|
location string
|
||||||
want *bq.Job
|
config LoadConfig
|
||||||
|
want *bq.Job
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||||
src: NewGCSReference("uri"),
|
src: NewGCSReference("uri"),
|
||||||
want: defaultLoadJob(),
|
want: defaultLoadJob(),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||||
|
src: NewGCSReference("uri"),
|
||||||
|
location: "loc",
|
||||||
|
want: func() *bq.Job {
|
||||||
|
j := defaultLoadJob()
|
||||||
|
j.JobReference.Location = "loc"
|
||||||
|
return j
|
||||||
|
}(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||||
jobID: "ajob",
|
jobID: "ajob",
|
||||||
config: LoadConfig{
|
config: LoadConfig{
|
||||||
CreateDisposition: CreateNever,
|
CreateDisposition: CreateNever,
|
||||||
WriteDisposition: WriteTruncate,
|
WriteDisposition: WriteTruncate,
|
||||||
Labels: map[string]string{"a": "b"},
|
Labels: map[string]string{"a": "b"},
|
||||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||||
|
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||||
|
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
||||||
},
|
},
|
||||||
src: NewGCSReference("uri"),
|
src: NewGCSReference("uri"),
|
||||||
want: func() *bq.Job {
|
want: func() *bq.Job {
|
||||||
|
@ -104,10 +117,12 @@ func TestLoad(t *testing.T) {
|
||||||
Type: "DAY",
|
Type: "DAY",
|
||||||
ExpirationMs: 1234,
|
ExpirationMs: 1234,
|
||||||
}
|
}
|
||||||
|
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||||
j.JobReference = &bq.JobReference{
|
j.JobReference = &bq.JobReference{
|
||||||
JobId: "ajob",
|
JobId: "ajob",
|
||||||
ProjectId: "client-project-id",
|
ProjectId: "client-project-id",
|
||||||
}
|
}
|
||||||
|
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
|
||||||
return j
|
return j
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
@ -224,6 +239,7 @@ func TestLoad(t *testing.T) {
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
loader := tc.dst.LoaderFrom(tc.src)
|
loader := tc.dst.LoaderFrom(tc.src)
|
||||||
loader.JobID = tc.jobID
|
loader.JobID = tc.jobID
|
||||||
|
loader.Location = tc.location
|
||||||
tc.config.Src = tc.src
|
tc.config.Src = tc.src
|
||||||
tc.config.Dst = tc.dst
|
tc.config.Dst = tc.dst
|
||||||
loader.LoadConfig = tc.config
|
loader.LoadConfig = tc.config
|
||||||
|
|
130
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
130
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
|
@ -15,9 +15,11 @@
|
||||||
package bigquery
|
package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
|
@ -134,6 +136,134 @@ func nulljson(valid bool, v interface{}) ([]byte, error) {
|
||||||
return json.Marshal(v)
|
return json.Marshal(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *NullInt64) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.Int64 = 0
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &n.Int64); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.Float64 = 0
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &n.Float64); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullBool) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.Bool = false
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &n.Bool); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullString) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.StringVal = ""
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &n.StringVal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.Timestamp = time.Time{}
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &n.Timestamp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullDate) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.Date = civil.Date{}
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &n.Date); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullTime) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.Time = civil.Time{}
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := strconv.Unquote(string(b))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := civil.ParseTime(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Time = t
|
||||||
|
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
|
||||||
|
n.Valid = false
|
||||||
|
n.DateTime = civil.DateTime{}
|
||||||
|
if bytes.Equal(b, jsonNull) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := strconv.Unquote(string(b))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := parseCivilDateTime(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.DateTime = dt
|
||||||
|
|
||||||
|
n.Valid = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
|
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
|
||||||
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
|
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
|
||||||
|
|
52
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
52
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
|
@ -16,7 +16,16 @@ package bigquery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"cloud.google.com/go/civil"
|
||||||
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000}
|
||||||
|
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime}
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNullsJSON(t *testing.T) {
|
func TestNullsJSON(t *testing.T) {
|
||||||
|
@ -24,23 +33,23 @@ func TestNullsJSON(t *testing.T) {
|
||||||
in interface{}
|
in interface{}
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{NullInt64{Valid: true, Int64: 3}, `3`},
|
{&NullInt64{Valid: true, Int64: 3}, `3`},
|
||||||
{NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
|
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
|
||||||
{NullBool{Valid: true, Bool: true}, `true`},
|
{&NullBool{Valid: true, Bool: true}, `true`},
|
||||||
{NullString{Valid: true, StringVal: "foo"}, `"foo"`},
|
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
|
||||||
{NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
|
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
|
||||||
{NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
|
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
|
||||||
{NullTime{Valid: true, Time: testTime}, `"07:50:22.000000"`},
|
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
|
||||||
{NullDateTime{Valid: true, DateTime: testDateTime}, `"2016-11-05 07:50:22.000000"`},
|
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`},
|
||||||
|
|
||||||
{NullInt64{}, `null`},
|
{&NullInt64{}, `null`},
|
||||||
{NullFloat64{}, `null`},
|
{&NullFloat64{}, `null`},
|
||||||
{NullBool{}, `null`},
|
{&NullBool{}, `null`},
|
||||||
{NullString{}, `null`},
|
{&NullString{}, `null`},
|
||||||
{NullTimestamp{}, `null`},
|
{&NullTimestamp{}, `null`},
|
||||||
{NullDate{}, `null`},
|
{&NullDate{}, `null`},
|
||||||
{NullTime{}, `null`},
|
{&NullTime{}, `null`},
|
||||||
{NullDateTime{}, `null`},
|
{&NullDateTime{}, `null`},
|
||||||
} {
|
} {
|
||||||
bytes, err := json.Marshal(test.in)
|
bytes, err := json.Marshal(test.in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -49,5 +58,16 @@ func TestNullsJSON(t *testing.T) {
|
||||||
if got, want := string(bytes), test.want; got != want {
|
if got, want := string(bytes), test.want; got != want {
|
||||||
t.Errorf("%#v: got %s, want %s", test.in, got, want)
|
t.Errorf("%#v: got %s, want %s", test.in, got, want)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type()
|
||||||
|
value := reflect.New(typ).Interface()
|
||||||
|
err = json.Unmarshal(bytes, value)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !testutil.Equal(value, test.in) {
|
||||||
|
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
9
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
9
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
|
@ -205,6 +204,8 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
||||||
|
|
||||||
case typeOfTime:
|
case typeOfTime:
|
||||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||||
|
// (If we send nanoseconds, then when we try to read the result we get "query job
|
||||||
|
// missing destination table").
|
||||||
res.Value = CivilTimeString(v.Interface().(civil.Time))
|
res.Value = CivilTimeString(v.Interface().(civil.Time))
|
||||||
return res, nil
|
return res, nil
|
||||||
|
|
||||||
|
@ -306,11 +307,7 @@ func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterTyp
|
||||||
case "TIMESTAMP":
|
case "TIMESTAMP":
|
||||||
return time.Parse(timestampFormat, qval.Value)
|
return time.Parse(timestampFormat, qval.Value)
|
||||||
case "DATETIME":
|
case "DATETIME":
|
||||||
parts := strings.Fields(qval.Value)
|
return parseCivilDateTime(qval.Value)
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value)
|
|
||||||
}
|
|
||||||
return civil.ParseDateTime(parts[0] + "T" + parts[1])
|
|
||||||
default:
|
default:
|
||||||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
|
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
|
||||||
}
|
}
|
||||||
|
|
6
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
|
@ -45,9 +45,9 @@ var scalarTests = []struct {
|
||||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||||
"2016-03-20 04:22:09.000005-01:02",
|
"2016-03-20 04:22:09.000005-01:02",
|
||||||
timestampParamType},
|
timestampParamType},
|
||||||
{civil.Date{2016, 3, 20}, "2016-03-20", dateParamType},
|
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType},
|
||||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType},
|
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType},
|
||||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}},
|
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}},
|
||||||
"2016-03-20 04:05:06.789000",
|
"2016-03-20 04:05:06.789000",
|
||||||
dateTimeParamType},
|
dateTimeParamType},
|
||||||
}
|
}
|
||||||
|
|
20
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
20
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
|
@ -115,17 +115,21 @@ type QueryConfig struct {
|
||||||
// call LastStatus on the returned job to get statistics. Calling Status on a
|
// call LastStatus on the returned job to get statistics. Calling Status on a
|
||||||
// dry-run job will fail.
|
// dry-run job will fail.
|
||||||
DryRun bool
|
DryRun bool
|
||||||
|
|
||||||
|
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||||
|
DestinationEncryptionConfig *EncryptionConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||||
qconf := &bq.JobConfigurationQuery{
|
qconf := &bq.JobConfigurationQuery{
|
||||||
Query: qc.Q,
|
Query: qc.Q,
|
||||||
CreateDisposition: string(qc.CreateDisposition),
|
CreateDisposition: string(qc.CreateDisposition),
|
||||||
WriteDisposition: string(qc.WriteDisposition),
|
WriteDisposition: string(qc.WriteDisposition),
|
||||||
AllowLargeResults: qc.AllowLargeResults,
|
AllowLargeResults: qc.AllowLargeResults,
|
||||||
Priority: string(qc.Priority),
|
Priority: string(qc.Priority),
|
||||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||||
TimePartitioning: qc.TimePartitioning.toBQ(),
|
TimePartitioning: qc.TimePartitioning.toBQ(),
|
||||||
|
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
|
||||||
}
|
}
|
||||||
if len(qc.TableDefinitions) > 0 {
|
if len(qc.TableDefinitions) > 0 {
|
||||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||||
|
@ -274,7 +278,7 @@ func (q *Query) newJob() (*bq.Job, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &bq.Job{
|
return &bq.Job{
|
||||||
JobReference: q.JobIDConfig.createJobRef(q.client.projectID),
|
JobReference: q.JobIDConfig.createJobRef(q.client),
|
||||||
Configuration: config,
|
Configuration: config,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
12
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
12
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
|
@ -120,9 +120,7 @@ func TestQuery(t *testing.T) {
|
||||||
g.MaxBadRecords = 1
|
g.MaxBadRecords = 1
|
||||||
g.Quote = "'"
|
g.Quote = "'"
|
||||||
g.SkipLeadingRows = 2
|
g.SkipLeadingRows = 2
|
||||||
g.Schema = Schema([]*FieldSchema{
|
g.Schema = Schema{{Name: "name", Type: StringFieldType}}
|
||||||
{Name: "name", Type: StringFieldType},
|
|
||||||
})
|
|
||||||
return g
|
return g
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
@ -352,6 +350,7 @@ func TestConfiguringQuery(t *testing.T) {
|
||||||
query.DefaultProjectID = "def-project-id"
|
query.DefaultProjectID = "def-project-id"
|
||||||
query.DefaultDatasetID = "def-dataset-id"
|
query.DefaultDatasetID = "def-dataset-id"
|
||||||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
||||||
|
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
|
||||||
// Note: Other configuration fields are tested in other tests above.
|
// Note: Other configuration fields are tested in other tests above.
|
||||||
// A lot of that can be consolidated once Client.Copy is gone.
|
// A lot of that can be consolidated once Client.Copy is gone.
|
||||||
|
|
||||||
|
@ -363,9 +362,10 @@ func TestConfiguringQuery(t *testing.T) {
|
||||||
ProjectId: "def-project-id",
|
ProjectId: "def-project-id",
|
||||||
DatasetId: "def-dataset-id",
|
DatasetId: "def-dataset-id",
|
||||||
},
|
},
|
||||||
UseLegacySql: false,
|
UseLegacySql: false,
|
||||||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
||||||
ForceSendFields: []string{"UseLegacySql"},
|
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||||
|
ForceSendFields: []string{"UseLegacySql"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
JobReference: &bq.JobReference{
|
JobReference: &bq.JobReference{
|
||||||
|
|
2
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
|
@ -179,7 +179,7 @@ func TestSchemaConversion(t *testing.T) {
|
||||||
Name: "outer",
|
Name: "outer",
|
||||||
Required: true,
|
Required: true,
|
||||||
Type: "RECORD",
|
Type: "RECORD",
|
||||||
Schema: []*FieldSchema{
|
Schema: Schema{
|
||||||
{
|
{
|
||||||
Description: "inner field",
|
Description: "inner field",
|
||||||
Name: "inner",
|
Name: "inner",
|
||||||
|
|
31
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
31
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
|
@ -76,6 +76,9 @@ type TableMetadata struct {
|
||||||
// Information about a table stored outside of BigQuery.
|
// Information about a table stored outside of BigQuery.
|
||||||
ExternalDataConfig *ExternalDataConfig
|
ExternalDataConfig *ExternalDataConfig
|
||||||
|
|
||||||
|
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||||
|
EncryptionConfig *EncryptionConfig
|
||||||
|
|
||||||
// All the fields below are read-only.
|
// All the fields below are read-only.
|
||||||
|
|
||||||
FullID string // An opaque ID uniquely identifying the table.
|
FullID string // An opaque ID uniquely identifying the table.
|
||||||
|
@ -175,6 +178,32 @@ func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncryptionConfig configures customer-managed encryption on tables.
|
||||||
|
type EncryptionConfig struct {
|
||||||
|
// Describes the Cloud KMS encryption key that will be used to protect
|
||||||
|
// destination BigQuery table. The BigQuery Service Account associated with your
|
||||||
|
// project requires access to this encryption key.
|
||||||
|
KMSKeyName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
|
||||||
|
if e == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &bq.EncryptionConfiguration{
|
||||||
|
KmsKeyName: e.KMSKeyName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
|
||||||
|
if q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &EncryptionConfig{
|
||||||
|
KMSKeyName: q.KmsKeyName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// StreamingBuffer holds information about the streaming buffer.
|
// StreamingBuffer holds information about the streaming buffer.
|
||||||
type StreamingBuffer struct {
|
type StreamingBuffer struct {
|
||||||
// A lower-bound estimate of the number of bytes currently in the streaming
|
// A lower-bound estimate of the number of bytes currently in the streaming
|
||||||
|
@ -265,6 +294,7 @@ func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
||||||
edc := tm.ExternalDataConfig.toBQ()
|
edc := tm.ExternalDataConfig.toBQ()
|
||||||
t.ExternalDataConfiguration = &edc
|
t.ExternalDataConfiguration = &edc
|
||||||
}
|
}
|
||||||
|
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
|
||||||
if tm.FullID != "" {
|
if tm.FullID != "" {
|
||||||
return nil, errors.New("cannot set FullID on create")
|
return nil, errors.New("cannot set FullID on create")
|
||||||
}
|
}
|
||||||
|
@ -320,6 +350,7 @@ func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
||||||
CreationTime: unixMillisToTime(t.CreationTime),
|
CreationTime: unixMillisToTime(t.CreationTime),
|
||||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||||
ETag: t.Etag,
|
ETag: t.Etag,
|
||||||
|
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration),
|
||||||
}
|
}
|
||||||
if t.Schema != nil {
|
if t.Schema != nil {
|
||||||
md.Schema = bqToSchema(t.Schema)
|
md.Schema = bqToSchema(t.Schema)
|
||||||
|
|
6
vendor/cloud.google.com/go/bigquery/table_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/table_test.go
generated
vendored
|
@ -53,6 +53,7 @@ func TestBQToTableMetadata(t *testing.T) {
|
||||||
Type: "DAY",
|
Type: "DAY",
|
||||||
Field: "pfield",
|
Field: "pfield",
|
||||||
},
|
},
|
||||||
|
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||||
Type: "EXTERNAL",
|
Type: "EXTERNAL",
|
||||||
View: &bq.ViewDefinition{Query: "view-query"},
|
View: &bq.ViewDefinition{Query: "view-query"},
|
||||||
Labels: map[string]string{"a": "b"},
|
Labels: map[string]string{"a": "b"},
|
||||||
|
@ -82,7 +83,8 @@ func TestBQToTableMetadata(t *testing.T) {
|
||||||
EstimatedRows: 3,
|
EstimatedRows: 3,
|
||||||
OldestEntryTime: aTime,
|
OldestEntryTime: aTime,
|
||||||
},
|
},
|
||||||
ETag: "etag",
|
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||||
|
ETag: "etag",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -115,6 +117,7 @@ func TestTableMetadataToBQ(t *testing.T) {
|
||||||
ExpirationTime: aTime,
|
ExpirationTime: aTime,
|
||||||
Labels: map[string]string{"a": "b"},
|
Labels: map[string]string{"a": "b"},
|
||||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
|
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
|
||||||
|
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||||
},
|
},
|
||||||
&bq.Table{
|
&bq.Table{
|
||||||
FriendlyName: "n",
|
FriendlyName: "n",
|
||||||
|
@ -127,6 +130,7 @@ func TestTableMetadataToBQ(t *testing.T) {
|
||||||
ExpirationTime: aTimeMillis,
|
ExpirationTime: aTimeMillis,
|
||||||
Labels: map[string]string{"a": "b"},
|
Labels: map[string]string{"a": "b"},
|
||||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
|
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
|
||||||
|
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
12
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
12
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/civil"
|
"cloud.google.com/go/civil"
|
||||||
|
@ -540,6 +541,7 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
||||||
type StructSaver struct {
|
type StructSaver struct {
|
||||||
// Schema determines what fields of the struct are uploaded. It should
|
// Schema determines what fields of the struct are uploaded. It should
|
||||||
// match the table's schema.
|
// match the table's schema.
|
||||||
|
// Schema is optional for StructSavers that are passed to Uploader.Put.
|
||||||
Schema Schema
|
Schema Schema
|
||||||
|
|
||||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||||
|
@ -707,6 +709,16 @@ func CivilDateTimeString(dt civil.DateTime) string {
|
||||||
return dt.Date.String() + " " + CivilTimeString(dt.Time)
|
return dt.Date.String() + " " + CivilTimeString(dt.Time)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseCivilDateTime parses a date-time represented in a BigQuery SQL
|
||||||
|
// compatible format and returns a civil.DateTime.
|
||||||
|
func parseCivilDateTime(s string) (civil.DateTime, error) {
|
||||||
|
parts := strings.Fields(s)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s)
|
||||||
|
}
|
||||||
|
return civil.ParseDateTime(parts[0] + "T" + parts[1])
|
||||||
|
}
|
||||||
|
|
||||||
// convertRows converts a series of TableRows into a series of Value slices.
|
// convertRows converts a series of TableRows into a series of Value slices.
|
||||||
// schema is used to interpret the data from rows; its length must match the
|
// schema is used to interpret the data from rows; its length must match the
|
||||||
// length of each row.
|
// length of each row.
|
||||||
|
|
42
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
42
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
|
@ -30,7 +30,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConvertBasicValues(t *testing.T) {
|
func TestConvertBasicValues(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{Type: StringFieldType},
|
{Type: StringFieldType},
|
||||||
{Type: IntegerFieldType},
|
{Type: IntegerFieldType},
|
||||||
{Type: FloatFieldType},
|
{Type: FloatFieldType},
|
||||||
|
@ -57,7 +57,7 @@ func TestConvertBasicValues(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConvertTime(t *testing.T) {
|
func TestConvertTime(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{Type: TimestampFieldType},
|
{Type: TimestampFieldType},
|
||||||
{Type: DateFieldType},
|
{Type: DateFieldType},
|
||||||
{Type: TimeFieldType},
|
{Type: TimeFieldType},
|
||||||
|
@ -103,9 +103,7 @@ func TestConvertSmallTimes(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConvertNullValues(t *testing.T) {
|
func TestConvertNullValues(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{{Type: StringFieldType}}
|
||||||
{Type: StringFieldType},
|
|
||||||
}
|
|
||||||
row := &bq.TableRow{
|
row := &bq.TableRow{
|
||||||
F: []*bq.TableCell{
|
F: []*bq.TableCell{
|
||||||
{V: nil},
|
{V: nil},
|
||||||
|
@ -122,7 +120,7 @@ func TestConvertNullValues(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBasicRepetition(t *testing.T) {
|
func TestBasicRepetition(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{Type: IntegerFieldType, Repeated: true},
|
{Type: IntegerFieldType, Repeated: true},
|
||||||
}
|
}
|
||||||
row := &bq.TableRow{
|
row := &bq.TableRow{
|
||||||
|
@ -153,7 +151,7 @@ func TestBasicRepetition(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNestedRecordContainingRepetition(t *testing.T) {
|
func TestNestedRecordContainingRepetition(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{
|
{
|
||||||
Type: RecordFieldType,
|
Type: RecordFieldType,
|
||||||
Schema: Schema{
|
Schema: Schema{
|
||||||
|
@ -190,7 +188,7 @@ func TestNestedRecordContainingRepetition(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{
|
{
|
||||||
Type: RecordFieldType,
|
Type: RecordFieldType,
|
||||||
Repeated: true,
|
Repeated: true,
|
||||||
|
@ -264,7 +262,7 @@ func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRepeatedRecordContainingRecord(t *testing.T) {
|
func TestRepeatedRecordContainingRecord(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{
|
{
|
||||||
Type: RecordFieldType,
|
Type: RecordFieldType,
|
||||||
Repeated: true,
|
Repeated: true,
|
||||||
|
@ -399,14 +397,17 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
vs: ValuesSaver{
|
vs: ValuesSaver{
|
||||||
Schema: []*FieldSchema{
|
Schema: Schema{
|
||||||
{Name: "intField", Type: IntegerFieldType},
|
{Name: "intField", Type: IntegerFieldType},
|
||||||
{Name: "strField", Type: StringFieldType},
|
{Name: "strField", Type: StringFieldType},
|
||||||
{Name: "dtField", Type: DateTimeFieldType},
|
{Name: "dtField", Type: DateTimeFieldType},
|
||||||
},
|
},
|
||||||
InsertID: "iid",
|
InsertID: "iid",
|
||||||
Row: []Value{1, "a",
|
Row: []Value{1, "a",
|
||||||
civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}},
|
civil.DateTime{
|
||||||
|
Date: civil.Date{Year: 1, Month: 2, Day: 3},
|
||||||
|
Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 7000}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
wantInsertID: "iid",
|
wantInsertID: "iid",
|
||||||
wantRow: map[string]Value{"intField": 1, "strField": "a",
|
wantRow: map[string]Value{"intField": 1, "strField": "a",
|
||||||
|
@ -414,12 +415,12 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
vs: ValuesSaver{
|
vs: ValuesSaver{
|
||||||
Schema: []*FieldSchema{
|
Schema: Schema{
|
||||||
{Name: "intField", Type: IntegerFieldType},
|
{Name: "intField", Type: IntegerFieldType},
|
||||||
{
|
{
|
||||||
Name: "recordField",
|
Name: "recordField",
|
||||||
Type: RecordFieldType,
|
Type: RecordFieldType,
|
||||||
Schema: []*FieldSchema{
|
Schema: Schema{
|
||||||
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
|
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -559,8 +560,8 @@ func TestStructSaver(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ct1 := civil.Time{1, 2, 3, 4000}
|
ct1 := civil.Time{Hour: 1, Minute: 2, Second: 3, Nanosecond: 4000}
|
||||||
ct2 := civil.Time{5, 6, 7, 8000}
|
ct2 := civil.Time{Hour: 5, Minute: 6, Second: 7, Nanosecond: 8000}
|
||||||
in := T{
|
in := T{
|
||||||
S: "x",
|
S: "x",
|
||||||
R: []int{1, 2},
|
R: []int{1, 2},
|
||||||
|
@ -629,7 +630,7 @@ func TestStructSaverErrors(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConvertRows(t *testing.T) {
|
func TestConvertRows(t *testing.T) {
|
||||||
schema := []*FieldSchema{
|
schema := Schema{
|
||||||
{Type: StringFieldType},
|
{Type: StringFieldType},
|
||||||
{Type: IntegerFieldType},
|
{Type: IntegerFieldType},
|
||||||
{Type: FloatFieldType},
|
{Type: FloatFieldType},
|
||||||
|
@ -772,9 +773,9 @@ var (
|
||||||
}
|
}
|
||||||
|
|
||||||
testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC)
|
testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC)
|
||||||
testDate = civil.Date{2016, 11, 5}
|
testDate = civil.Date{Year: 2016, Month: 11, Day: 5}
|
||||||
testTime = civil.Time{7, 50, 22, 8}
|
testTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 8}
|
||||||
testDateTime = civil.DateTime{testDate, testTime}
|
testDateTime = civil.DateTime{Date: testDate, Time: testTime}
|
||||||
|
|
||||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true,
|
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true,
|
||||||
testTimestamp, testDate, testTime, testDateTime,
|
testTimestamp, testDate, testTime, testDateTime,
|
||||||
|
@ -1069,7 +1070,7 @@ func TestStructLoaderErrors(t *testing.T) {
|
||||||
t.Errorf("%T: got nil, want error", bad6)
|
t.Errorf("%T: got nil, want error", bad6)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sl.set's error is sticky, with even good input.
|
// sl.set's error is sticky, even with good input.
|
||||||
err2 := sl.set(&repStruct{}, repSchema)
|
err2 := sl.set(&repStruct{}, repSchema)
|
||||||
if err2 != err {
|
if err2 != err {
|
||||||
t.Errorf("%v != %v, expected equal", err2, err)
|
t.Errorf("%v != %v, expected equal", err2, err)
|
||||||
|
@ -1087,6 +1088,7 @@ func TestStructLoaderErrors(t *testing.T) {
|
||||||
{Name: "b", Type: BooleanFieldType},
|
{Name: "b", Type: BooleanFieldType},
|
||||||
{Name: "s", Type: StringFieldType},
|
{Name: "s", Type: StringFieldType},
|
||||||
{Name: "d", Type: DateFieldType},
|
{Name: "d", Type: DateFieldType},
|
||||||
|
{Name: "r", Type: RecordFieldType, Schema: Schema{{Name: "X", Type: IntegerFieldType}}},
|
||||||
}
|
}
|
||||||
type s struct {
|
type s struct {
|
||||||
I int
|
I int
|
||||||
|
|
55
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
55
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
|
@ -106,10 +106,17 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
||||||
req := &btapb.ListTablesRequest{
|
req := &btapb.ListTablesRequest{
|
||||||
Parent: prefix,
|
Parent: prefix,
|
||||||
}
|
}
|
||||||
res, err := ac.tClient.ListTables(ctx, req)
|
|
||||||
|
var res *btapb.ListTablesResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
res, err = ac.tClient.ListTables(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
names := make([]string, 0, len(res.Tables))
|
names := make([]string, 0, len(res.Tables))
|
||||||
for _, tbl := range res.Tables {
|
for _, tbl := range res.Tables {
|
||||||
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
|
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
|
||||||
|
@ -145,13 +152,13 @@ func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf)
|
||||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||||
var req_splits []*btapb.CreateTableRequest_Split
|
var req_splits []*btapb.CreateTableRequest_Split
|
||||||
for _, split := range conf.SplitKeys {
|
for _, split := range conf.SplitKeys {
|
||||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
|
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{Key: []byte(split)})
|
||||||
}
|
}
|
||||||
var tbl btapb.Table
|
var tbl btapb.Table
|
||||||
if conf.Families != nil {
|
if conf.Families != nil {
|
||||||
tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
|
tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
|
||||||
for fam, policy := range conf.Families {
|
for fam, policy := range conf.Families {
|
||||||
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()}
|
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{GcRule: policy.proto()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prefix := ac.instancePrefix()
|
prefix := ac.instancePrefix()
|
||||||
|
@ -174,7 +181,7 @@ func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family str
|
||||||
Name: prefix + "/tables/" + table,
|
Name: prefix + "/tables/" + table,
|
||||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
Id: family,
|
Id: family,
|
||||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||||
|
@ -200,7 +207,7 @@ func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family str
|
||||||
Name: prefix + "/tables/" + table,
|
Name: prefix + "/tables/" + table,
|
||||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
Id: family,
|
Id: family,
|
||||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{Drop: true},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||||
|
@ -227,10 +234,18 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo,
|
||||||
req := &btapb.GetTableRequest{
|
req := &btapb.GetTableRequest{
|
||||||
Name: prefix + "/tables/" + table,
|
Name: prefix + "/tables/" + table,
|
||||||
}
|
}
|
||||||
res, err := ac.tClient.GetTable(ctx, req)
|
|
||||||
|
var res *btapb.Table
|
||||||
|
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
var err error
|
||||||
|
res, err = ac.tClient.GetTable(ctx, req)
|
||||||
|
return err
|
||||||
|
}, retryOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ti := &TableInfo{}
|
ti := &TableInfo{}
|
||||||
for name, fam := range res.ColumnFamilies {
|
for name, fam := range res.ColumnFamilies {
|
||||||
ti.Families = append(ti.Families, name)
|
ti.Families = append(ti.Families, name)
|
||||||
|
@ -249,7 +264,7 @@ func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, po
|
||||||
Name: prefix + "/tables/" + table,
|
Name: prefix + "/tables/" + table,
|
||||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
Id: family,
|
Id: family,
|
||||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||||
|
@ -262,7 +277,7 @@ func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix str
|
||||||
prefix := ac.instancePrefix()
|
prefix := ac.instancePrefix()
|
||||||
req := &btapb.DropRowRangeRequest{
|
req := &btapb.DropRowRangeRequest{
|
||||||
Name: prefix + "/tables/" + table,
|
Name: prefix + "/tables/" + table,
|
||||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)},
|
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte(rowKeyPrefix)},
|
||||||
}
|
}
|
||||||
_, err := ac.tClient.DropRowRange(ctx, req)
|
_, err := ac.tClient.DropRowRange(ctx, req)
|
||||||
return err
|
return err
|
||||||
|
@ -697,7 +712,7 @@ func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context,
|
||||||
// DeleteInstance deletes an instance from the project.
|
// DeleteInstance deletes an instance from the project.
|
||||||
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
|
req := &btapb.DeleteInstanceRequest{Name: "projects/" + iac.project + "/instances/" + instanceId}
|
||||||
_, err := iac.iClient.DeleteInstance(ctx, req)
|
_, err := iac.iClient.DeleteInstance(ctx, req)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -808,7 +823,7 @@ func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *Cluster
|
||||||
// production use. It is not subject to any SLA or deprecation policy.
|
// production use. It is not subject to any SLA or deprecation policy.
|
||||||
func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error {
|
func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error {
|
||||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
req := &btapb.DeleteClusterRequest{"projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId}
|
req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId}
|
||||||
_, err := iac.iClient.DeleteCluster(ctx, req)
|
_, err := iac.iClient.DeleteCluster(ctx, req)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -848,3 +863,23 @@ func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string)
|
||||||
}
|
}
|
||||||
return cis, nil
|
return cis, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetCluster fetches a cluster in an instance
|
||||||
|
func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) {
|
||||||
|
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||||
|
req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters" + clusterID}
|
||||||
|
c, err := iac.iClient.GetCluster(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nameParts := strings.Split(c.Name, "/")
|
||||||
|
locParts := strings.Split(c.Location, "/")
|
||||||
|
cis := &ClusterInfo{
|
||||||
|
Name: nameParts[len(nameParts)-1],
|
||||||
|
Zone: locParts[len(locParts)-1],
|
||||||
|
ServeNodes: int(c.ServeNodes),
|
||||||
|
State: c.State.String(),
|
||||||
|
}
|
||||||
|
return cis, nil
|
||||||
|
}
|
||||||
|
|
132
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
132
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
|
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -183,6 +184,67 @@ func TestAdminIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInstanceUpdate(t *testing.T) {
|
||||||
|
testEnv, err := NewIntegrationEnv()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("IntegrationEnv: %v", err)
|
||||||
|
}
|
||||||
|
defer testEnv.Close()
|
||||||
|
|
||||||
|
timeout := 2 * time.Second
|
||||||
|
if testEnv.Config().UseProd {
|
||||||
|
timeout = 5 * time.Minute
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
adminClient, err := testEnv.NewAdminClient()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewAdminClient: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer adminClient.Close()
|
||||||
|
|
||||||
|
iAdminClient, err := testEnv.NewInstanceAdminClient()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewInstanceAdminClient: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if iAdminClient == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer iAdminClient.Close()
|
||||||
|
|
||||||
|
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("InstanceInfo: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if iInfo.Name != adminClient.instance {
|
||||||
|
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
if iInfo.DisplayName != adminClient.instance {
|
||||||
|
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
const numNodes = 4
|
||||||
|
// update cluster nodes
|
||||||
|
if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil {
|
||||||
|
t.Errorf("UpdateCluster: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get cluster after updating
|
||||||
|
cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("GetCluster %v", err)
|
||||||
|
}
|
||||||
|
if cis.ServeNodes != int(numNodes) {
|
||||||
|
t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAdminSnapshotIntegration(t *testing.T) {
|
func TestAdminSnapshotIntegration(t *testing.T) {
|
||||||
testEnv, err := NewIntegrationEnv()
|
testEnv, err := NewIntegrationEnv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -299,3 +361,73 @@ func TestAdminSnapshotIntegration(t *testing.T) {
|
||||||
t.Fatalf("List after delete len: %d, want: %d", got, want)
|
t.Fatalf("List after delete len: %d, want: %d", got, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGranularity(t *testing.T) {
|
||||||
|
testEnv, err := NewIntegrationEnv()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("IntegrationEnv: %v", err)
|
||||||
|
}
|
||||||
|
defer testEnv.Close()
|
||||||
|
|
||||||
|
timeout := 2 * time.Second
|
||||||
|
if testEnv.Config().UseProd {
|
||||||
|
timeout = 5 * time.Minute
|
||||||
|
}
|
||||||
|
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||||
|
|
||||||
|
adminClient, err := testEnv.NewAdminClient()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewAdminClient: %v", err)
|
||||||
|
}
|
||||||
|
defer adminClient.Close()
|
||||||
|
|
||||||
|
list := func() []string {
|
||||||
|
tbls, err := adminClient.Tables(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Fetching list of tables: %v", err)
|
||||||
|
}
|
||||||
|
sort.Strings(tbls)
|
||||||
|
return tbls
|
||||||
|
}
|
||||||
|
containsAll := func(got, want []string) bool {
|
||||||
|
gotSet := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, s := range got {
|
||||||
|
gotSet[s] = true
|
||||||
|
}
|
||||||
|
for _, s := range want {
|
||||||
|
if !gotSet[s] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
defer adminClient.DeleteTable(ctx, "mytable")
|
||||||
|
|
||||||
|
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
|
||||||
|
t.Fatalf("Creating table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tables := list()
|
||||||
|
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
|
||||||
|
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calling ModifyColumnFamilies to check the granularity of table
|
||||||
|
prefix := adminClient.instancePrefix()
|
||||||
|
req := &btapb.ModifyColumnFamiliesRequest{
|
||||||
|
Name: prefix + "/tables/" + "mytable",
|
||||||
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
|
Id: "cf",
|
||||||
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Creating column family: %v", err)
|
||||||
|
}
|
||||||
|
if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) {
|
||||||
|
t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
99
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
99
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
|
@ -33,6 +33,7 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
const prodAddr = "bigtable.googleapis.com:443"
|
const prodAddr = "bigtable.googleapis.com:443"
|
||||||
|
@ -83,11 +84,13 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("dialing: %v", err)
|
return nil, fmt.Errorf("dialing: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
client: btpb.NewBigtableClient(conn),
|
client: btpb.NewBigtableClient(conn),
|
||||||
project: project,
|
project: project,
|
||||||
instance: instance,
|
instance: instance,
|
||||||
|
appProfile: config.AppProfile,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +150,11 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
|
||||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||||
|
|
||||||
var prevRowKey string
|
var prevRowKey string
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
var err error
|
||||||
|
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows")
|
||||||
|
defer func() { traceEndSpan(ctx, err) }()
|
||||||
|
attrMap := make(map[string]interface{})
|
||||||
|
err = gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
if !arg.valid() {
|
if !arg.valid() {
|
||||||
// Empty row set, no need to make an API call.
|
// Empty row set, no need to make an API call.
|
||||||
// NOTE: we must return early if arg == RowList{} because reading
|
// NOTE: we must return early if arg == RowList{} because reading
|
||||||
|
@ -165,6 +172,7 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
|
||||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
stream, err := t.c.client.ReadRows(ctx, req)
|
stream, err := t.c.client.ReadRows(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -178,6 +186,10 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset arg for next Invoke call.
|
// Reset arg for next Invoke call.
|
||||||
arg = arg.retainRowsAfter(prevRowKey)
|
arg = arg.retainRowsAfter(prevRowKey)
|
||||||
|
attrMap["rowKey"] = prevRowKey
|
||||||
|
attrMap["error"] = err.Error()
|
||||||
|
attrMap["time_secs"] = time.Since(startTime).Seconds()
|
||||||
|
tracePrintf(ctx, attrMap, "Retry details in ReadRows")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,10 +329,10 @@ func (r RowRange) String() string {
|
||||||
|
|
||||||
func (r RowRange) proto() *btpb.RowSet {
|
func (r RowRange) proto() *btpb.RowSet {
|
||||||
rr := &btpb.RowRange{
|
rr := &btpb.RowRange{
|
||||||
StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
|
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)},
|
||||||
}
|
}
|
||||||
if !r.Unbounded() {
|
if !r.Unbounded() {
|
||||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
|
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)}
|
||||||
}
|
}
|
||||||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
|
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
|
||||||
}
|
}
|
||||||
|
@ -462,6 +474,9 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply")
|
||||||
|
defer func() { traceEndSpan(ctx, err) }()
|
||||||
var callOptions []gax.CallOption
|
var callOptions []gax.CallOption
|
||||||
if m.cond == nil {
|
if m.cond == nil {
|
||||||
req := &btpb.MutateRowRequest{
|
req := &btpb.MutateRowRequest{
|
||||||
|
@ -507,7 +522,7 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
|
||||||
callOptions = retryOptions
|
callOptions = retryOptions
|
||||||
}
|
}
|
||||||
var cmRes *btpb.CheckAndMutateRowResponse
|
var cmRes *btpb.CheckAndMutateRowResponse
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
err = gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
var err error
|
var err error
|
||||||
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
|
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
|
||||||
return err
|
return err
|
||||||
|
@ -564,7 +579,7 @@ func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
|
||||||
// The timestamp will be truncated to millisecond granularity.
|
// The timestamp will be truncated to millisecond granularity.
|
||||||
// A timestamp of ServerTime means to use the server timestamp.
|
// A timestamp of ServerTime means to use the server timestamp.
|
||||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: family,
|
FamilyName: family,
|
||||||
ColumnQualifier: []byte(column),
|
ColumnQualifier: []byte(column),
|
||||||
TimestampMicros: int64(ts.TruncateToMilliseconds()),
|
TimestampMicros: int64(ts.TruncateToMilliseconds()),
|
||||||
|
@ -574,7 +589,7 @@ func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||||
|
|
||||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
||||||
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||||
FamilyName: family,
|
FamilyName: family,
|
||||||
ColumnQualifier: []byte(column),
|
ColumnQualifier: []byte(column),
|
||||||
}}})
|
}}})
|
||||||
|
@ -585,7 +600,7 @@ func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||||
// If end is zero, it will be interpreted as infinity.
|
// If end is zero, it will be interpreted as infinity.
|
||||||
// The timestamps will be truncated to millisecond granularity.
|
// The timestamps will be truncated to millisecond granularity.
|
||||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
||||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||||
FamilyName: family,
|
FamilyName: family,
|
||||||
ColumnQualifier: []byte(column),
|
ColumnQualifier: []byte(column),
|
||||||
TimeRange: &btpb.TimestampRange{
|
TimeRange: &btpb.TimestampRange{
|
||||||
|
@ -597,14 +612,14 @@ func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timest
|
||||||
|
|
||||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
||||||
func (m *Mutation) DeleteCellsInFamily(family string) {
|
func (m *Mutation) DeleteCellsInFamily(family string) {
|
||||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
|
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{
|
||||||
FamilyName: family,
|
FamilyName: family,
|
||||||
}}})
|
}}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRow deletes the entire row.
|
// DeleteRow deletes the entire row.
|
||||||
func (m *Mutation) DeleteRow() {
|
func (m *Mutation) DeleteRow() {
|
||||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
|
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}})
|
||||||
}
|
}
|
||||||
|
|
||||||
// entryErr is a container that combines an entry with the error that was returned for it.
|
// entryErr is a container that combines an entry with the error that was returned for it.
|
||||||
|
@ -642,7 +657,13 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
|
||||||
// entries will be reduced after each invocation to just what needs to be retried.
|
// entries will be reduced after each invocation to just what needs to be retried.
|
||||||
entries := make([]*entryErr, len(rowKeys))
|
entries := make([]*entryErr, len(rowKeys))
|
||||||
copy(entries, origEntries)
|
copy(entries, origEntries)
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
var err error
|
||||||
|
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk")
|
||||||
|
defer func() { traceEndSpan(ctx, err) }()
|
||||||
|
attrMap := make(map[string]interface{})
|
||||||
|
err = gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
attrMap["rowCount"] = len(entries)
|
||||||
|
tracePrintf(ctx, attrMap, "Row count in ApplyBulk")
|
||||||
err := t.doApplyBulk(ctx, entries, opts...)
|
err := t.doApplyBulk(ctx, entries, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We want to retry the entire request with the current entries
|
// We want to retry the entire request with the current entries
|
||||||
|
@ -652,11 +673,10 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
|
||||||
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
|
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
|
||||||
// We have at least one mutation that needs to be retried.
|
// We have at least one mutation that needs to be retried.
|
||||||
// Return an arbitrary error that is retryable according to callOptions.
|
// Return an arbitrary error that is retryable according to callOptions.
|
||||||
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, retryOptions...)
|
}, retryOptions...)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -721,11 +741,11 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, entry := range res.Entries {
|
for i, entry := range res.Entries {
|
||||||
status := entry.Status
|
s := entry.Status
|
||||||
if status.Code == int32(codes.OK) {
|
if s.Code == int32(codes.OK) {
|
||||||
entryErrs[i].Err = nil
|
entryErrs[i].Err = nil
|
||||||
} else {
|
} else {
|
||||||
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
|
entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
after(res)
|
after(res)
|
||||||
|
@ -803,7 +823,7 @@ func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
|
||||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||||
FamilyName: family,
|
FamilyName: family,
|
||||||
ColumnQualifier: []byte(column),
|
ColumnQualifier: []byte(column),
|
||||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
|
Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -815,7 +835,7 @@ func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
||||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||||
FamilyName: family,
|
FamilyName: family,
|
||||||
ColumnQualifier: []byte(column),
|
ColumnQualifier: []byte(column),
|
||||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
|
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -825,3 +845,40 @@ func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context
|
||||||
mdCopy, _ := metadata.FromOutgoingContext(ctx)
|
mdCopy, _ := metadata.FromOutgoingContext(ctx)
|
||||||
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
|
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) {
|
||||||
|
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||||
|
var sampledRowKeys []string
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||||
|
sampledRowKeys = nil
|
||||||
|
req := &btpb.SampleRowKeysRequest{
|
||||||
|
TableName: t.c.fullTableName(t.table),
|
||||||
|
AppProfileId: t.c.appProfile,
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
stream, err := t.c.client.SampleRowKeys(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
res, err := stream.Recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key := string(res.RowKey)
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sampledRowKeys = append(sampledRowKeys, key)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, retryOptions...)
|
||||||
|
return sampledRowKeys, err
|
||||||
|
}
|
||||||
|
|
107
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
107
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
|
@ -154,10 +154,11 @@ func TestClientIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
checkpoint("inserted initial data")
|
checkpoint("inserted initial data")
|
||||||
|
|
||||||
if err := adminClient.WaitForReplication(ctx, table); err != nil {
|
// TODO(igorbernstein): re-enable this when ready
|
||||||
t.Errorf("Waiting for replication for table %q: %v", table, err)
|
//if err := adminClient.WaitForReplication(ctx, table); err != nil {
|
||||||
}
|
// t.Errorf("Waiting for replication for table %q: %v", table, err)
|
||||||
checkpoint("waited for replication")
|
//}
|
||||||
|
//checkpoint("waited for replication")
|
||||||
|
|
||||||
// Do a conditional mutation with a complex filter.
|
// Do a conditional mutation with a complex filter.
|
||||||
mutTrue := NewMutation()
|
mutTrue := NewMutation()
|
||||||
|
@ -1062,3 +1063,101 @@ func clearTimestamps(r Row) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSampleRowKeys(t *testing.T) {
|
||||||
|
start := time.Now()
|
||||||
|
lastCheckpoint := start
|
||||||
|
checkpoint := func(s string) {
|
||||||
|
n := time.Now()
|
||||||
|
t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
|
||||||
|
lastCheckpoint = n
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
client, adminClient, table, err := doSetup(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
defer adminClient.Close()
|
||||||
|
tbl := client.Open(table)
|
||||||
|
// Delete the table at the end of the test.
|
||||||
|
// Do this even before creating the table so that if this is running
|
||||||
|
// against production and CreateTable fails there's a chance of cleaning it up.
|
||||||
|
defer adminClient.DeleteTable(ctx, table)
|
||||||
|
|
||||||
|
// Insert some data.
|
||||||
|
initialData := map[string][]string{
|
||||||
|
"wmckinley11": {"tjefferson11"},
|
||||||
|
"gwashington77": {"jadams77"},
|
||||||
|
"tjefferson0": {"gwashington0", "jadams0"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for row, ss := range initialData {
|
||||||
|
mut := NewMutation()
|
||||||
|
for _, name := range ss {
|
||||||
|
mut.Set("follows", name, 0, []byte("1"))
|
||||||
|
}
|
||||||
|
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||||
|
t.Errorf("Mutating row %q: %v", row, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkpoint("inserted initial data")
|
||||||
|
sampleKeys, err := tbl.SampleRowKeys(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %v", "SampleRowKeys:", err)
|
||||||
|
}
|
||||||
|
if len(sampleKeys) == 0 {
|
||||||
|
t.Error("SampleRowKeys length 0")
|
||||||
|
}
|
||||||
|
checkpoint("tested SampleRowKeys.")
|
||||||
|
}
|
||||||
|
|
||||||
|
func doSetup(ctx context.Context) (*Client, *AdminClient, string, error) {
|
||||||
|
start := time.Now()
|
||||||
|
lastCheckpoint := start
|
||||||
|
checkpoint := func(s string) {
|
||||||
|
n := time.Now()
|
||||||
|
fmt.Printf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
|
||||||
|
lastCheckpoint = n
|
||||||
|
}
|
||||||
|
|
||||||
|
testEnv, err := NewIntegrationEnv()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, "", fmt.Errorf("IntegrationEnv: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var timeout time.Duration
|
||||||
|
if testEnv.Config().UseProd {
|
||||||
|
timeout = 10 * time.Minute
|
||||||
|
fmt.Printf("Running test against production")
|
||||||
|
} else {
|
||||||
|
timeout = 1 * time.Minute
|
||||||
|
fmt.Printf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
client, err := testEnv.NewClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, "", fmt.Errorf("Client: %v", err)
|
||||||
|
}
|
||||||
|
checkpoint("dialed Client")
|
||||||
|
|
||||||
|
adminClient, err := testEnv.NewAdminClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, "", fmt.Errorf("AdminClient: %v", err)
|
||||||
|
}
|
||||||
|
checkpoint("dialed AdminClient")
|
||||||
|
|
||||||
|
table := testEnv.Config().Table
|
||||||
|
if err := adminClient.CreateTable(ctx, table); err != nil {
|
||||||
|
return nil, nil, "", fmt.Errorf("Creating table: %v", err)
|
||||||
|
}
|
||||||
|
checkpoint("created table")
|
||||||
|
if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil {
|
||||||
|
return nil, nil, "", fmt.Errorf("Creating column family: %v", err)
|
||||||
|
}
|
||||||
|
checkpoint(`created "follows" column family`)
|
||||||
|
|
||||||
|
return client, adminClient, table, nil
|
||||||
|
}
|
||||||
|
|
15
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
15
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
|
@ -121,7 +121,7 @@ func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest)
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if _, ok := s.tables[tbl]; ok {
|
if _, ok := s.tables[tbl]; ok {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
|
return nil, status.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
|
||||||
}
|
}
|
||||||
s.tables[tbl] = newTable(req)
|
s.tables[tbl] = newTable(req)
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
@ -151,7 +151,7 @@ func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*bta
|
||||||
tblIns, ok := s.tables[tbl]
|
tblIns, ok := s.tables[tbl]
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl)
|
return nil, status.Errorf(codes.NotFound, "table %q not found", tbl)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &btapb.Table{
|
return &btapb.Table{
|
||||||
|
@ -177,7 +177,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
|
||||||
tbl, ok := s.tables[req.Name]
|
tbl, ok := s.tables[req.Name]
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
|
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
tbl.mu.Lock()
|
tbl.mu.Lock()
|
||||||
|
@ -186,7 +186,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
|
||||||
for _, mod := range req.Modifications {
|
for _, mod := range req.Modifications {
|
||||||
if create := mod.GetCreate(); create != nil {
|
if create := mod.GetCreate(); create != nil {
|
||||||
if _, ok := tbl.families[mod.Id]; ok {
|
if _, ok := tbl.families[mod.Id]; ok {
|
||||||
return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
|
return nil, status.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
|
||||||
}
|
}
|
||||||
newcf := &columnFamily{
|
newcf := &columnFamily{
|
||||||
name: req.Name + "/columnFamilies/" + mod.Id,
|
name: req.Name + "/columnFamilies/" + mod.Id,
|
||||||
|
@ -218,6 +218,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
|
||||||
return &btapb.Table{
|
return &btapb.Table{
|
||||||
Name: tblName,
|
Name: tblName,
|
||||||
ColumnFamilies: toColumnFamilies(tbl.families),
|
ColumnFamilies: toColumnFamilies(tbl.families),
|
||||||
|
Granularity: btapb.Table_TimestampGranularity(btapb.Table_MILLIS),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,7 +416,7 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (
|
||||||
// We can't have a cell with just COMMIT set, which would imply a new empty cell.
|
// We can't have a cell with just COMMIT set, which would imply a new empty cell.
|
||||||
// So modify the last cell to have the COMMIT flag set.
|
// So modify the last cell to have the COMMIT flag set.
|
||||||
if len(rrr.Chunks) > 0 {
|
if len(rrr.Chunks) > 0 {
|
||||||
rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true}
|
rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, stream.Send(rrr)
|
return true, stream.Send(rrr)
|
||||||
|
@ -429,6 +430,10 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
||||||
}
|
}
|
||||||
// Handle filters that apply beyond just including/excluding cells.
|
// Handle filters that apply beyond just including/excluding cells.
|
||||||
switch f := f.Filter.(type) {
|
switch f := f.Filter.(type) {
|
||||||
|
case *btpb.RowFilter_BlockAllFilter:
|
||||||
|
return !f.BlockAllFilter
|
||||||
|
case *btpb.RowFilter_PassAllFilter:
|
||||||
|
return f.PassAllFilter
|
||||||
case *btpb.RowFilter_Chain_:
|
case *btpb.RowFilter_Chain_:
|
||||||
for _, sub := range f.Chain.Filters {
|
for _, sub := range f.Chain.Filters {
|
||||||
if !filterRow(sub, r) {
|
if !filterRow(sub, r) {
|
||||||
|
|
161
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
161
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
|
@ -46,7 +46,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||||
Name: name,
|
Name: name,
|
||||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
Id: "cf",
|
Id: "cf",
|
||||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
_, err := s.ModifyColumnFamilies(ctx, req)
|
_, err := s.ModifyColumnFamilies(ctx, req)
|
||||||
|
@ -57,8 +57,8 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||||
Name: name,
|
Name: name,
|
||||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
Id: "cf",
|
Id: "cf",
|
||||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{
|
||||||
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}},
|
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}},
|
||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||||
var ts int64
|
var ts int64
|
||||||
ms := func() []*btpb.Mutation {
|
ms := func() []*btpb.Mutation {
|
||||||
return []*btpb.Mutation{{
|
return []*btpb.Mutation{{
|
||||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: "cf",
|
FamilyName: "cf",
|
||||||
ColumnQualifier: []byte(`col`),
|
ColumnQualifier: []byte(`col`),
|
||||||
TimestampMicros: atomic.AddInt64(&ts, 1000),
|
TimestampMicros: atomic.AddInt64(&ts, 1000),
|
||||||
|
@ -85,7 +85,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||||
Rules: []*btpb.ReadModifyWriteRule{{
|
Rules: []*btpb.ReadModifyWriteRule{{
|
||||||
FamilyName: "cf",
|
FamilyName: "cf",
|
||||||
ColumnQualifier: []byte("col"),
|
ColumnQualifier: []byte("col"),
|
||||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,8 +139,8 @@ func TestCreateTableWithFamily(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
newTbl := btapb.Table{
|
newTbl := btapb.Table{
|
||||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}},
|
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}},
|
||||||
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}},
|
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
@ -184,7 +184,7 @@ func TestSampleRowKeys(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
newTbl := btapb.Table{
|
newTbl := btapb.Table{
|
||||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
@ -200,7 +200,7 @@ func TestSampleRowKeys(t *testing.T) {
|
||||||
TableName: tbl.Name,
|
TableName: tbl.Name,
|
||||||
RowKey: []byte("row-" + strconv.Itoa(i)),
|
RowKey: []byte("row-" + strconv.Itoa(i)),
|
||||||
Mutations: []*btpb.Mutation{{
|
Mutations: []*btpb.Mutation{{
|
||||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: "cf",
|
FamilyName: "cf",
|
||||||
ColumnQualifier: []byte("col"),
|
ColumnQualifier: []byte("col"),
|
||||||
TimestampMicros: 0,
|
TimestampMicros: 0,
|
||||||
|
@ -235,7 +235,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
newTbl := btapb.Table{
|
newTbl := btapb.Table{
|
||||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
@ -255,7 +255,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
TableName: tblInfo.Name,
|
TableName: tblInfo.Name,
|
||||||
RowKey: []byte(prefix + strconv.Itoa(i)),
|
RowKey: []byte(prefix + strconv.Itoa(i)),
|
||||||
Mutations: []*btpb.Mutation{{
|
Mutations: []*btpb.Mutation{{
|
||||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: "cf",
|
FamilyName: "cf",
|
||||||
ColumnQualifier: []byte("col"),
|
ColumnQualifier: []byte("col"),
|
||||||
TimestampMicros: 0,
|
TimestampMicros: 0,
|
||||||
|
@ -274,7 +274,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
tblSize := tbl.rows.Len()
|
tblSize := tbl.rows.Len()
|
||||||
req := &btapb.DropRowRangeRequest{
|
req := &btapb.DropRowRangeRequest{
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")},
|
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("AAA")},
|
||||||
}
|
}
|
||||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||||
t.Fatalf("Dropping first range: %v", err)
|
t.Fatalf("Dropping first range: %v", err)
|
||||||
|
@ -286,7 +286,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
|
|
||||||
req = &btapb.DropRowRangeRequest{
|
req = &btapb.DropRowRangeRequest{
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")},
|
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("DDD")},
|
||||||
}
|
}
|
||||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||||
t.Fatalf("Dropping second range: %v", err)
|
t.Fatalf("Dropping second range: %v", err)
|
||||||
|
@ -298,7 +298,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
|
|
||||||
req = &btapb.DropRowRangeRequest{
|
req = &btapb.DropRowRangeRequest{
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")},
|
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("XXX")},
|
||||||
}
|
}
|
||||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||||
t.Fatalf("Dropping invalid range: %v", err)
|
t.Fatalf("Dropping invalid range: %v", err)
|
||||||
|
@ -310,7 +310,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
|
|
||||||
req = &btapb.DropRowRangeRequest{
|
req = &btapb.DropRowRangeRequest{
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true},
|
||||||
}
|
}
|
||||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||||
t.Fatalf("Dropping all data: %v", err)
|
t.Fatalf("Dropping all data: %v", err)
|
||||||
|
@ -326,7 +326,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
|
|
||||||
req = &btapb.DropRowRangeRequest{
|
req = &btapb.DropRowRangeRequest{
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true},
|
||||||
}
|
}
|
||||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||||
t.Fatalf("Dropping all data: %v", err)
|
t.Fatalf("Dropping all data: %v", err)
|
||||||
|
@ -344,7 +344,7 @@ func TestDropRowRange(t *testing.T) {
|
||||||
|
|
||||||
req = &btapb.DropRowRangeRequest{
|
req = &btapb.DropRowRangeRequest{
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")},
|
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("BBB")},
|
||||||
}
|
}
|
||||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||||
t.Fatalf("Dropping range: %v", err)
|
t.Fatalf("Dropping range: %v", err)
|
||||||
|
@ -373,7 +373,7 @@ func TestReadRows(t *testing.T) {
|
||||||
}
|
}
|
||||||
newTbl := btapb.Table{
|
newTbl := btapb.Table{
|
||||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
@ -384,7 +384,7 @@ func TestReadRows(t *testing.T) {
|
||||||
TableName: tblInfo.Name,
|
TableName: tblInfo.Name,
|
||||||
RowKey: []byte("row"),
|
RowKey: []byte("row"),
|
||||||
Mutations: []*btpb.Mutation{{
|
Mutations: []*btpb.Mutation{{
|
||||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: "cf0",
|
FamilyName: "cf0",
|
||||||
ColumnQualifier: []byte("col"),
|
ColumnQualifier: []byte("col"),
|
||||||
TimestampMicros: 1000,
|
TimestampMicros: 1000,
|
||||||
|
@ -398,11 +398,11 @@ func TestReadRows(t *testing.T) {
|
||||||
|
|
||||||
for _, rowset := range []*btpb.RowSet{
|
for _, rowset := range []*btpb.RowSet{
|
||||||
{RowKeys: [][]byte{[]byte("row")}},
|
{RowKeys: [][]byte{[]byte("row")}},
|
||||||
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{[]byte("")}}}},
|
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}}}},
|
||||||
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{[]byte("r")}}}},
|
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("r")}}}},
|
||||||
{RowRanges: []*btpb.RowRange{{
|
{RowRanges: []*btpb.RowRange{{
|
||||||
StartKey: &btpb.RowRange_StartKeyClosed{[]byte("")},
|
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")},
|
||||||
EndKey: &btpb.RowRange_EndKeyOpen{[]byte("s")},
|
EndKey: &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte("s")},
|
||||||
}}},
|
}}},
|
||||||
} {
|
} {
|
||||||
mock := &MockReadRowsServer{}
|
mock := &MockReadRowsServer{}
|
||||||
|
@ -423,7 +423,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
newTbl := btapb.Table{
|
newTbl := btapb.Table{
|
||||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
@ -436,7 +436,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||||
Name: tblInfo.Name,
|
Name: tblInfo.Name,
|
||||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
Id: "cf" + strconv.Itoa(i),
|
Id: "cf" + strconv.Itoa(i),
|
||||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -454,7 +454,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||||
TableName: tblInfo.Name,
|
TableName: tblInfo.Name,
|
||||||
RowKey: []byte("row"),
|
RowKey: []byte("row"),
|
||||||
Mutations: []*btpb.Mutation{{
|
Mutations: []*btpb.Mutation{{
|
||||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: "cf" + strconv.Itoa(fc),
|
FamilyName: "cf" + strconv.Itoa(fc),
|
||||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
||||||
TimestampMicros: int64((tc + 1) * 1000),
|
TimestampMicros: int64((tc + 1) * 1000),
|
||||||
|
@ -512,16 +512,17 @@ func TestReadRowsOrder(t *testing.T) {
|
||||||
|
|
||||||
// Read with interleave filter
|
// Read with interleave filter
|
||||||
inter := &btpb.RowFilter_Interleave{}
|
inter := &btpb.RowFilter_Interleave{}
|
||||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}}
|
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "1"}}
|
||||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}}
|
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("2")}}
|
||||||
inter.Filters = append(inter.Filters, fnr, cqr)
|
inter.Filters = append(inter.Filters, fnr, cqr)
|
||||||
req = &btpb.ReadRowsRequest{
|
req = &btpb.ReadRowsRequest{
|
||||||
TableName: tblInfo.Name,
|
TableName: tblInfo.Name,
|
||||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||||
Filter: &btpb.RowFilter{
|
Filter: &btpb.RowFilter{
|
||||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
Filter: &btpb.RowFilter_Interleave_{Interleave: inter},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mock = &MockReadRowsServer{}
|
mock = &MockReadRowsServer{}
|
||||||
if err = s.ReadRows(req, mock); err != nil {
|
if err = s.ReadRows(req, mock); err != nil {
|
||||||
t.Errorf("ReadRows error: %v", err)
|
t.Errorf("ReadRows error: %v", err)
|
||||||
|
@ -542,7 +543,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||||
Rules: []*btpb.ReadModifyWriteRule{{
|
Rules: []*btpb.ReadModifyWriteRule{{
|
||||||
FamilyName: "cf3",
|
FamilyName: "cf3",
|
||||||
ColumnQualifier: []byte("col" + strconv.Itoa(i)),
|
ColumnQualifier: []byte("col" + strconv.Itoa(i)),
|
||||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1},
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -573,7 +574,7 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
newTbl := btapb.Table{
|
newTbl := btapb.Table{
|
||||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
@ -587,7 +588,7 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||||
TableName: tbl.Name,
|
TableName: tbl.Name,
|
||||||
RowKey: []byte("row-present"),
|
RowKey: []byte("row-present"),
|
||||||
Mutations: []*btpb.Mutation{{
|
Mutations: []*btpb.Mutation{{
|
||||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||||
FamilyName: "cf",
|
FamilyName: "cf",
|
||||||
ColumnQualifier: []byte("col"),
|
ColumnQualifier: []byte("col"),
|
||||||
TimestampMicros: 0,
|
TimestampMicros: 0,
|
||||||
|
@ -619,3 +620,99 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||||
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
|
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// helper function to populate table data
|
||||||
|
func populateTable(ctx context.Context, s *server) (*btapb.Table, error) {
|
||||||
|
newTbl := btapb.Table{
|
||||||
|
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||||
|
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
count := 3
|
||||||
|
mcf := func(i int) *btapb.ModifyColumnFamiliesRequest {
|
||||||
|
return &btapb.ModifyColumnFamiliesRequest{
|
||||||
|
Name: tblInfo.Name,
|
||||||
|
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||||
|
Id: "cf" + strconv.Itoa(i),
|
||||||
|
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 1; i <= count; i++ {
|
||||||
|
_, err = s.ModifyColumnFamilies(ctx, mcf(i))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Populate the table
|
||||||
|
for fc := 0; fc < count; fc++ {
|
||||||
|
for cc := count; cc > 0; cc-- {
|
||||||
|
for tc := 0; tc < count; tc++ {
|
||||||
|
req := &btpb.MutateRowRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
RowKey: []byte("row"),
|
||||||
|
Mutations: []*btpb.Mutation{{
|
||||||
|
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||||
|
FamilyName: "cf" + strconv.Itoa(fc),
|
||||||
|
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
||||||
|
TimestampMicros: int64((tc + 1) * 1000),
|
||||||
|
Value: []byte{},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tblInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilters(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
in *btpb.RowFilter
|
||||||
|
out int
|
||||||
|
}{
|
||||||
|
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{true}}, out: 0},
|
||||||
|
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{false}}, out: 1},
|
||||||
|
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{true}}, out: 1},
|
||||||
|
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{false}}, out: 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
s := &server{
|
||||||
|
tables: make(map[string]*table),
|
||||||
|
}
|
||||||
|
|
||||||
|
tblInfo, err := populateTable(ctx, s)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &btpb.ReadRowsRequest{
|
||||||
|
TableName: tblInfo.Name,
|
||||||
|
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
req.Filter = tc.in
|
||||||
|
|
||||||
|
mock := &MockReadRowsServer{}
|
||||||
|
if err = s.ReadRows(req, mock); err != nil {
|
||||||
|
t.Errorf("ReadRows error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(mock.responses) != tc.out {
|
||||||
|
t.Errorf("Response count: got %d, want %d", len(mock.responses), tc.out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
55
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
55
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
|
@ -1074,12 +1074,12 @@ func doSet(ctx context.Context, args ...string) {
|
||||||
|
|
||||||
func doSetGCPolicy(ctx context.Context, args ...string) {
|
func doSetGCPolicy(ctx context.Context, args ...string) {
|
||||||
if len(args) < 3 {
|
if len(args) < 3 {
|
||||||
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
|
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> | maxage=<d> (and|or) maxversions=<n> )")
|
||||||
}
|
}
|
||||||
table := args[0]
|
table := args[0]
|
||||||
fam := args[1]
|
fam := args[1]
|
||||||
|
|
||||||
pol, err := parseGCPolicy(args[2])
|
pol, err := parseGCPolicy(strings.Join(args[2:], " "))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -1101,24 +1101,55 @@ func doWaitForReplicaiton(ctx context.Context, args ...string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) {
|
func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) {
|
||||||
var pol bigtable.GCPolicy
|
words := strings.Fields(policyStr)
|
||||||
switch p := policyStr; {
|
switch len(words) {
|
||||||
case strings.HasPrefix(p, "maxage="):
|
case 1:
|
||||||
d, err := parseDuration(p[7:])
|
return parseSinglePolicy(words[0])
|
||||||
|
case 3:
|
||||||
|
p1, err := parseSinglePolicy(words[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pol = bigtable.MaxAgePolicy(d)
|
p2, err := parseSinglePolicy(words[2])
|
||||||
case strings.HasPrefix(p, "maxversions="):
|
|
||||||
n, err := strconv.ParseUint(p[12:], 10, 16)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pol = bigtable.MaxVersionsPolicy(int(n))
|
switch words[1] {
|
||||||
|
case "and":
|
||||||
|
return bigtable.IntersectionPolicy(p1, p2), nil
|
||||||
|
case "or":
|
||||||
|
return bigtable.UnionPolicy(p1, p2), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Expected 'and' or 'or', saw %q", words[1])
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Bad GC policy %q", p)
|
return nil, fmt.Errorf("Expected '1' or '3' parameter count, saw %d", len(words))
|
||||||
}
|
}
|
||||||
return pol, nil
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSinglePolicy(s string) (bigtable.GCPolicy, error) {
|
||||||
|
words := strings.Split(s, "=")
|
||||||
|
if len(words) != 2 {
|
||||||
|
return nil, fmt.Errorf("Expected 'name=value', got %q", words)
|
||||||
|
}
|
||||||
|
switch words[0] {
|
||||||
|
case "maxage":
|
||||||
|
d, err := parseDuration(words[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bigtable.MaxAgePolicy(d), nil
|
||||||
|
case "maxversions":
|
||||||
|
n, err := strconv.ParseUint(words[1], 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bigtable.MaxVersionsPolicy(int(n)), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Expected 'maxage' or 'maxversions', got %q", words[1])
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {
|
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {
|
||||||
|
|
54
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
54
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
|
@ -17,6 +17,9 @@ package main
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/bigtable"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseDuration(t *testing.T) {
|
func TestParseDuration(t *testing.T) {
|
||||||
|
@ -57,3 +60,54 @@ func TestParseDuration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseGCPolicy(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
in string
|
||||||
|
out bigtable.GCPolicy
|
||||||
|
fail bool
|
||||||
|
}{
|
||||||
|
{in: "maxage=1h", out: bigtable.MaxAgePolicy(time.Hour * 1)},
|
||||||
|
{in: "maxversions=2", out: bigtable.MaxVersionsPolicy(int(2))},
|
||||||
|
{in: "maxversions=2 and maxage=1h", out: bigtable.IntersectionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)},
|
||||||
|
{in: "maxversions=2 or maxage=1h", out: bigtable.UnionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)},
|
||||||
|
|
||||||
|
{in: "maxage=1", fail: true},
|
||||||
|
{in: "maxage = 1h", fail: true},
|
||||||
|
{in: "maxage =1h", fail: true},
|
||||||
|
{in: "maxage= 1h", fail: true},
|
||||||
|
{in: "foomaxage=1h", fail: true},
|
||||||
|
{in: "maxversions=1h", fail: true},
|
||||||
|
{in: "maxversions= 1", fail: true},
|
||||||
|
{in: "maxversions = 1", fail: true},
|
||||||
|
{in: "maxversions =1", fail: true},
|
||||||
|
{in: "barmaxversions=1", fail: true},
|
||||||
|
{in: "maxage = 1h or maxversions=1h", fail: true},
|
||||||
|
{in: "foomaxversions=2 or maxage=1h", fail: true},
|
||||||
|
{in: "maxversions=2 or barmaxage=1h", fail: true},
|
||||||
|
{in: "foomaxversions=2 or barmaxage=1h", fail: true},
|
||||||
|
{in: "maxage = 1h and maxversions=1h", fail: true},
|
||||||
|
{in: "foomaxage=1h and maxversions=1", fail: true},
|
||||||
|
{in: "maxage=1h and barmaxversions=1", fail: true},
|
||||||
|
{in: "foomaxage=1h and barmaxversions=1", fail: true},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
got, err := parseGCPolicy(tc.in)
|
||||||
|
if !tc.fail && err != nil {
|
||||||
|
t.Errorf("parseGCPolicy(%q) unexpectedly failed: %v", tc.in, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tc.fail && err == nil {
|
||||||
|
t.Errorf("parseGCPolicy(%q) did not fail", tc.in)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tc.fail {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var cmpOpts cmp.Options
|
||||||
|
cmpOpts = append(cmpOpts, cmp.AllowUnexported(bigtable.IntersectionPolicy([]bigtable.GCPolicy{}...)), cmp.AllowUnexported(bigtable.UnionPolicy([]bigtable.GCPolicy{}...)))
|
||||||
|
if !cmp.Equal(got, tc.out, cmpOpts) {
|
||||||
|
t.Errorf("parseGCPolicy(%q) =%v, want %v", tc.in, got, tc.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
49
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
49
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
|
@ -51,7 +51,7 @@ func (cf chainFilter) proto() *btpb.RowFilter {
|
||||||
chain.Filters = append(chain.Filters, sf.proto())
|
chain.Filters = append(chain.Filters, sf.proto())
|
||||||
}
|
}
|
||||||
return &btpb.RowFilter{
|
return &btpb.RowFilter{
|
||||||
Filter: &btpb.RowFilter_Chain_{chain},
|
Filter: &btpb.RowFilter_Chain_{Chain: chain},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ func (ilf interleaveFilter) proto() *btpb.RowFilter {
|
||||||
inter.Filters = append(inter.Filters, sf.proto())
|
inter.Filters = append(inter.Filters, sf.proto())
|
||||||
}
|
}
|
||||||
return &btpb.RowFilter{
|
return &btpb.RowFilter{
|
||||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
Filter: &btpb.RowFilter_Interleave_{Interleave: inter},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ type rowKeyFilter string
|
||||||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
|
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
|
||||||
|
|
||||||
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
|
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte(rkf)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FamilyFilter returns a filter that matches cells whose family name
|
// FamilyFilter returns a filter that matches cells whose family name
|
||||||
|
@ -104,7 +104,7 @@ type familyFilter string
|
||||||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
|
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
|
||||||
|
|
||||||
func (ff familyFilter) proto() *btpb.RowFilter {
|
func (ff familyFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: string(ff)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ColumnFilter returns a filter that matches cells whose column name
|
// ColumnFilter returns a filter that matches cells whose column name
|
||||||
|
@ -117,7 +117,7 @@ type columnFilter string
|
||||||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
|
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
|
||||||
|
|
||||||
func (cf columnFilter) proto() *btpb.RowFilter {
|
func (cf columnFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte(cf)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValueFilter returns a filter that matches cells whose value
|
// ValueFilter returns a filter that matches cells whose value
|
||||||
|
@ -130,7 +130,7 @@ type valueFilter string
|
||||||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
|
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
|
||||||
|
|
||||||
func (vf valueFilter) proto() *btpb.RowFilter {
|
func (vf valueFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{ValueRegexFilter: []byte(vf)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LatestNFilter returns a filter that matches the most recent N cells in each column.
|
// LatestNFilter returns a filter that matches the most recent N cells in each column.
|
||||||
|
@ -141,7 +141,7 @@ type latestNFilter int32
|
||||||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
|
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
|
||||||
|
|
||||||
func (lnf latestNFilter) proto() *btpb.RowFilter {
|
func (lnf latestNFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{CellsPerColumnLimitFilter: int32(lnf)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StripValueFilter returns a filter that replaces each value with the empty string.
|
// StripValueFilter returns a filter that replaces each value with the empty string.
|
||||||
|
@ -151,7 +151,7 @@ type stripValueFilter struct{}
|
||||||
|
|
||||||
func (stripValueFilter) String() string { return "strip_value()" }
|
func (stripValueFilter) String() string { return "strip_value()" }
|
||||||
func (stripValueFilter) proto() *btpb.RowFilter {
|
func (stripValueFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{StripValueTransformer: true}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero
|
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero
|
||||||
|
@ -186,11 +186,10 @@ func (trf timestampRangeFilter) String() string {
|
||||||
|
|
||||||
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{
|
return &btpb.RowFilter{
|
||||||
Filter: &btpb.RowFilter_TimestampRangeFilter{
|
Filter: &btpb.RowFilter_TimestampRangeFilter{TimestampRangeFilter: &btpb.TimestampRange{
|
||||||
&btpb.TimestampRange{
|
StartTimestampMicros: int64(trf.startTime.TruncateToMilliseconds()),
|
||||||
int64(trf.startTime.TruncateToMilliseconds()),
|
EndTimestampMicros: int64(trf.endTime.TruncateToMilliseconds()),
|
||||||
int64(trf.endTime.TruncateToMilliseconds()),
|
},
|
||||||
},
|
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,12 +212,12 @@ func (crf columnRangeFilter) String() string {
|
||||||
func (crf columnRangeFilter) proto() *btpb.RowFilter {
|
func (crf columnRangeFilter) proto() *btpb.RowFilter {
|
||||||
r := &btpb.ColumnRange{FamilyName: crf.family}
|
r := &btpb.ColumnRange{FamilyName: crf.family}
|
||||||
if crf.start != "" {
|
if crf.start != "" {
|
||||||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)}
|
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{StartQualifierClosed: []byte(crf.start)}
|
||||||
}
|
}
|
||||||
if crf.end != "" {
|
if crf.end != "" {
|
||||||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)}
|
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{EndQualifierOpen: []byte(crf.end)}
|
||||||
}
|
}
|
||||||
return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnRangeFilter{ColumnRangeFilter: r}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValueRangeFilter returns a filter that matches cells with values that fall within
|
// ValueRangeFilter returns a filter that matches cells with values that fall within
|
||||||
|
@ -239,12 +238,12 @@ func (vrf valueRangeFilter) String() string {
|
||||||
func (vrf valueRangeFilter) proto() *btpb.RowFilter {
|
func (vrf valueRangeFilter) proto() *btpb.RowFilter {
|
||||||
r := &btpb.ValueRange{}
|
r := &btpb.ValueRange{}
|
||||||
if vrf.start != nil {
|
if vrf.start != nil {
|
||||||
r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start}
|
r.StartValue = &btpb.ValueRange_StartValueClosed{StartValueClosed: vrf.start}
|
||||||
}
|
}
|
||||||
if vrf.end != nil {
|
if vrf.end != nil {
|
||||||
r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end}
|
r.EndValue = &btpb.ValueRange_EndValueOpen{EndValueOpen: vrf.end}
|
||||||
}
|
}
|
||||||
return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRangeFilter{ValueRangeFilter: r}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConditionFilter returns a filter that evaluates to one of two possible filters depending
|
// ConditionFilter returns a filter that evaluates to one of two possible filters depending
|
||||||
|
@ -278,10 +277,10 @@ func (cf conditionFilter) proto() *btpb.RowFilter {
|
||||||
ff = cf.falseFilter.proto()
|
ff = cf.falseFilter.proto()
|
||||||
}
|
}
|
||||||
return &btpb.RowFilter{
|
return &btpb.RowFilter{
|
||||||
&btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{
|
Filter: &btpb.RowFilter_Condition_{Condition: &btpb.RowFilter_Condition{
|
||||||
cf.predicateFilter.proto(),
|
PredicateFilter: cf.predicateFilter.proto(),
|
||||||
tf,
|
TrueFilter: tf,
|
||||||
ff,
|
FalseFilter: ff,
|
||||||
}}}
|
}}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,7 +296,7 @@ func (cof cellsPerRowOffsetFilter) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
|
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{CellsPerRowOffsetFilter: int32(cof)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
|
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
|
||||||
|
@ -312,7 +311,7 @@ func (clf cellsPerRowLimitFilter) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
|
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
|
||||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}}
|
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(dsymonds): More filters: sampling
|
// TODO(dsymonds): More filters: sampling
|
||||||
|
|
8
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
8
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
|
@ -52,7 +52,7 @@ func (ip intersectionPolicy) proto() *bttdpb.GcRule {
|
||||||
inter.Rules = append(inter.Rules, sp.proto())
|
inter.Rules = append(inter.Rules, sp.proto())
|
||||||
}
|
}
|
||||||
return &bttdpb.GcRule{
|
return &bttdpb.GcRule{
|
||||||
Rule: &bttdpb.GcRule_Intersection_{inter},
|
Rule: &bttdpb.GcRule_Intersection_{Intersection: inter},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ func (up unionPolicy) proto() *bttdpb.GcRule {
|
||||||
union.Rules = append(union.Rules, sp.proto())
|
union.Rules = append(union.Rules, sp.proto())
|
||||||
}
|
}
|
||||||
return &bttdpb.GcRule{
|
return &bttdpb.GcRule{
|
||||||
Rule: &bttdpb.GcRule_Union_{union},
|
Rule: &bttdpb.GcRule_Union_{Union: union},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ type maxVersionsPolicy int
|
||||||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
|
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
|
||||||
|
|
||||||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
|
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
|
||||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
|
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{MaxNumVersions: int32(mvp)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxAgePolicy returns a GC policy that applies to all cells
|
// MaxAgePolicy returns a GC policy that applies to all cells
|
||||||
|
@ -123,7 +123,7 @@ func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
||||||
// Fix this if people care about GC policies over 290 years.
|
// Fix this if people care about GC policies over 290 years.
|
||||||
ns := time.Duration(ma).Nanoseconds()
|
ns := time.Duration(ma).Nanoseconds()
|
||||||
return &bttdpb.GcRule{
|
return &bttdpb.GcRule{
|
||||||
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
|
Rule: &bttdpb.GcRule_MaxAge{MaxAge: &durpb.Duration{
|
||||||
Seconds: ns / 1e9,
|
Seconds: ns / 1e9,
|
||||||
Nanos: int32(ns % 1e9),
|
Nanos: int32(ns % 1e9),
|
||||||
}},
|
}},
|
||||||
|
|
68
vendor/cloud.google.com/go/bigtable/go18.go
generated
vendored
Normal file
68
vendor/cloud.google.com/go/bigtable/go18.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package bigtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.opencensus.io/plugin/ocgrpc"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openCensusOptions() []option.ClientOption {
|
||||||
|
return []option.ClientOption{
|
||||||
|
option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceStartSpan(ctx context.Context, name string) context.Context {
|
||||||
|
ctx, _ = trace.StartSpan(ctx, name)
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceEndSpan(ctx context.Context, err error) {
|
||||||
|
span := trace.FromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
span.SetStatus(trace.Status{Message: err.Error()})
|
||||||
|
}
|
||||||
|
|
||||||
|
span.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
||||||
|
var attrs []trace.Attribute
|
||||||
|
for k, v := range attrMap {
|
||||||
|
var a trace.Attribute
|
||||||
|
switch v := v.(type) {
|
||||||
|
case string:
|
||||||
|
a = trace.StringAttribute(k, v)
|
||||||
|
case bool:
|
||||||
|
a = trace.BoolAttribute(k, v)
|
||||||
|
case int:
|
||||||
|
a = trace.Int64Attribute(k, int64(v))
|
||||||
|
case int64:
|
||||||
|
a = trace.Int64Attribute(k, v)
|
||||||
|
default:
|
||||||
|
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
||||||
|
}
|
||||||
|
attrs = append(attrs, a)
|
||||||
|
}
|
||||||
|
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
||||||
|
}
|
4
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
4
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
|
@ -20,8 +20,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRandomizedDelays(t *testing.T) {
|
func TestRandomizedDelays(t *testing.T) {
|
||||||
|
@ -43,7 +43,7 @@ func TestRandomizedDelays(t *testing.T) {
|
||||||
}
|
}
|
||||||
invokeTime = time.Now()
|
invokeTime = time.Now()
|
||||||
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90
|
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90
|
||||||
errf := grpc.Errorf
|
errf := status.Errorf
|
||||||
return errf(codes.Unavailable, "")
|
return errf(codes.Unavailable, "")
|
||||||
}, settings...)
|
}, settings...)
|
||||||
}
|
}
|
||||||
|
|
36
vendor/cloud.google.com/go/bigtable/not_go18.go
generated
vendored
Normal file
36
vendor/cloud.google.com/go/bigtable/not_go18.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !go1.8
|
||||||
|
|
||||||
|
package bigtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpenCensus only supports go 1.8 and higher.
|
||||||
|
|
||||||
|
func openCensusOptions() []option.ClientOption { return nil }
|
||||||
|
|
||||||
|
func traceStartSpan(ctx context.Context, _ string) context.Context {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceEndSpan(context.Context, error) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) {
|
||||||
|
}
|
21
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
21
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
||||||
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
|
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
|
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
|
||||||
|
@ -42,12 +43,12 @@ func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
|
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
|
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -76,7 +77,7 @@ func TestRetryApply(t *testing.T) {
|
||||||
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||||
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 {
|
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 {
|
||||||
errCount++
|
errCount++
|
||||||
return nil, grpc.Errorf(code, "")
|
return nil, status.Errorf(code, "")
|
||||||
}
|
}
|
||||||
return handler(ctx, req)
|
return handler(ctx, req)
|
||||||
}
|
}
|
||||||
|
@ -156,7 +157,7 @@ func TestRetryApplyBulk(t *testing.T) {
|
||||||
f = func(ss grpc.ServerStream) error {
|
f = func(ss grpc.ServerStream) error {
|
||||||
if errCount < 3 {
|
if errCount < 3 {
|
||||||
errCount++
|
errCount++
|
||||||
return grpc.Errorf(codes.Aborted, "")
|
return status.Errorf(codes.Aborted, "")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -182,7 +183,7 @@ func TestRetryApplyBulk(t *testing.T) {
|
||||||
switch errCount {
|
switch errCount {
|
||||||
case 0:
|
case 0:
|
||||||
// Retryable request failure
|
// Retryable request failure
|
||||||
err = grpc.Errorf(codes.Unavailable, "")
|
err = status.Errorf(codes.Unavailable, "")
|
||||||
case 1:
|
case 1:
|
||||||
// Two mutations fail
|
// Two mutations fail
|
||||||
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
|
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
|
||||||
|
@ -235,8 +236,8 @@ func TestRetryApplyBulk(t *testing.T) {
|
||||||
t.Errorf("unretryable errors: request failed %v", err)
|
t.Errorf("unretryable errors: request failed %v", err)
|
||||||
}
|
}
|
||||||
want := []error{
|
want := []error{
|
||||||
grpc.Errorf(codes.FailedPrecondition, ""),
|
status.Errorf(codes.FailedPrecondition, ""),
|
||||||
grpc.Errorf(codes.Aborted, ""),
|
status.Errorf(codes.Aborted, ""),
|
||||||
}
|
}
|
||||||
if !testutil.Equal(want, errors) {
|
if !testutil.Equal(want, errors) {
|
||||||
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
|
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
|
||||||
|
@ -323,20 +324,20 @@ func TestRetryReadRows(t *testing.T) {
|
||||||
switch errCount {
|
switch errCount {
|
||||||
case 0:
|
case 0:
|
||||||
// Retryable request failure
|
// Retryable request failure
|
||||||
err = grpc.Errorf(codes.Unavailable, "")
|
err = status.Errorf(codes.Unavailable, "")
|
||||||
case 1:
|
case 1:
|
||||||
// Write two rows then error
|
// Write two rows then error
|
||||||
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||||
t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
|
t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
|
||||||
}
|
}
|
||||||
writeReadRowsResponse(ss, "a", "b")
|
writeReadRowsResponse(ss, "a", "b")
|
||||||
err = grpc.Errorf(codes.Unavailable, "")
|
err = status.Errorf(codes.Unavailable, "")
|
||||||
case 2:
|
case 2:
|
||||||
// Retryable request failure
|
// Retryable request failure
|
||||||
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||||
t.Errorf("2 range retries: got %q, want %q", got, want)
|
t.Errorf("2 range retries: got %q, want %q", got, want)
|
||||||
}
|
}
|
||||||
err = grpc.Errorf(codes.Unavailable, "")
|
err = status.Errorf(codes.Unavailable, "")
|
||||||
case 3:
|
case 3:
|
||||||
// Write two more rows
|
// Write two more rows
|
||||||
writeReadRowsResponse(ss, "c", "d")
|
writeReadRowsResponse(ss, "c", "d")
|
||||||
|
|
30
vendor/cloud.google.com/go/cloud.go
generated
vendored
30
vendor/cloud.google.com/go/cloud.go
generated
vendored
|
@ -12,9 +12,29 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package cloud is the root of the packages used to access Google Cloud
|
/*
|
||||||
// Services. See https://godoc.org/cloud.google.com/go for a full list
|
Package cloud is the root of the packages used to access Google Cloud
|
||||||
// of sub-packages.
|
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||||
//
|
of sub-packages.
|
||||||
// This package documents how to authorize and authenticate the sub packages.
|
|
||||||
|
Examples in this package show ways to authorize and authenticate the
|
||||||
|
sub packages.
|
||||||
|
|
||||||
|
Connection Pooling
|
||||||
|
|
||||||
|
Connection pooling differs in clients based on their transport. Cloud
|
||||||
|
clients either rely on HTTP or gRPC transports to communicate
|
||||||
|
with Google Cloud.
|
||||||
|
|
||||||
|
Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the
|
||||||
|
underlying HTTP transport to cache connections for later re-use. These are cached to
|
||||||
|
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
||||||
|
http.DefaultTransport.
|
||||||
|
|
||||||
|
For gPRC clients (all others in this repo), connection pooling is configurable. Users
|
||||||
|
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
||||||
|
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||||
|
pooled and addressed in a round robin fashion.
|
||||||
|
|
||||||
|
*/
|
||||||
package cloud // import "cloud.google.com/go"
|
package cloud // import "cloud.google.com/go"
|
||||||
|
|
|
@ -43,15 +43,15 @@ func TestValueCollector(t *testing.T) {
|
||||||
c := NewCollector(&Program{}, 26)
|
c := NewCollector(&Program{}, 26)
|
||||||
// Add some variables of various types, whose values we want the collector to read.
|
// Add some variables of various types, whose values we want the collector to read.
|
||||||
variablesToAdd := []debug.LocalVar{
|
variablesToAdd := []debug.LocalVar{
|
||||||
{Name: "a", Var: debug.Var{int16Type, 0x1}},
|
{Name: "a", Var: debug.Var{TypeID: int16Type, Address: 0x1}},
|
||||||
{Name: "b", Var: debug.Var{stringType, 0x2}},
|
{Name: "b", Var: debug.Var{TypeID: stringType, Address: 0x2}},
|
||||||
{Name: "c", Var: debug.Var{structType, 0x3}},
|
{Name: "c", Var: debug.Var{TypeID: structType, Address: 0x3}},
|
||||||
{Name: "d", Var: debug.Var{pointerType, 0x4}},
|
{Name: "d", Var: debug.Var{TypeID: pointerType, Address: 0x4}},
|
||||||
{Name: "e", Var: debug.Var{arrayType, 0x5}},
|
{Name: "e", Var: debug.Var{TypeID: arrayType, Address: 0x5}},
|
||||||
{Name: "f", Var: debug.Var{debugStringType, 0x6}},
|
{Name: "f", Var: debug.Var{TypeID: debugStringType, Address: 0x6}},
|
||||||
{Name: "g", Var: debug.Var{mapType, 0x7}},
|
{Name: "g", Var: debug.Var{TypeID: mapType, Address: 0x7}},
|
||||||
{Name: "h", Var: debug.Var{channelType, 0x8}},
|
{Name: "h", Var: debug.Var{TypeID: channelType, Address: 0x8}},
|
||||||
{Name: "i", Var: debug.Var{sliceType, 0x9}},
|
{Name: "i", Var: debug.Var{TypeID: sliceType, Address: 0x9}},
|
||||||
}
|
}
|
||||||
expectedResults := []*cd.Variable{
|
expectedResults := []*cd.Variable{
|
||||||
&cd.Variable{Name: "a", VarTableIndex: 1},
|
&cd.Variable{Name: "a", VarTableIndex: 1},
|
||||||
|
@ -195,17 +195,17 @@ func (p *Program) Value(v debug.Var) (debug.Value, error) {
|
||||||
Fields: []debug.StructField{
|
Fields: []debug.StructField{
|
||||||
{
|
{
|
||||||
Name: "x",
|
Name: "x",
|
||||||
Var: debug.Var{int16Type, 0x1},
|
Var: debug.Var{TypeID: int16Type, Address: 0x1},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "y",
|
Name: "y",
|
||||||
Var: debug.Var{stringType, 0x2},
|
Var: debug.Var{TypeID: stringType, Address: 0x2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
case pointerType:
|
case pointerType:
|
||||||
// A pointer to the first variable above.
|
// A pointer to the first variable above.
|
||||||
return debug.Pointer{int16Type, 0x1}, nil
|
return debug.Pointer{TypeID: int16Type, Address: 0x1}, nil
|
||||||
case arrayType:
|
case arrayType:
|
||||||
// An array of 4 32-bit-wide elements.
|
// An array of 4 32-bit-wide elements.
|
||||||
return debug.Array{
|
return debug.Array{
|
||||||
|
|
63
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
63
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
gtransport "google.golang.org/api/transport/grpc"
|
gtransport "google.golang.org/api/transport/grpc"
|
||||||
|
@ -302,11 +303,14 @@ func (c *Client) Close() error {
|
||||||
// type than the one it was stored from, or when a field is missing or
|
// type than the one it was stored from, or when a field is missing or
|
||||||
// unexported in the destination struct. ErrFieldMismatch is only returned if
|
// unexported in the destination struct. ErrFieldMismatch is only returned if
|
||||||
// dst is a struct pointer.
|
// dst is a struct pointer.
|
||||||
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
|
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
|
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
|
||||||
return ErrInvalidEntityType
|
return ErrInvalidEntityType
|
||||||
}
|
}
|
||||||
err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
|
err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
|
||||||
if me, ok := err.(MultiError); ok {
|
if me, ok := err.(MultiError); ok {
|
||||||
return me[0]
|
return me[0]
|
||||||
}
|
}
|
||||||
|
@ -323,7 +327,10 @@ func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
|
||||||
// As a special case, PropertyList is an invalid type for dst, even though a
|
// As a special case, PropertyList is an invalid type for dst, even though a
|
||||||
// PropertyList is a slice of structs. It is treated as invalid to avoid being
|
// PropertyList is a slice of structs. It is treated as invalid to avoid being
|
||||||
// mistakenly passed when []PropertyList was intended.
|
// mistakenly passed when []PropertyList was intended.
|
||||||
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {
|
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
return c.get(ctx, keys, dst, nil)
|
return c.get(ctx, keys, dst, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,7 +459,11 @@ func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, erro
|
||||||
// PutMulti is a batch version of Put.
|
// PutMulti is a batch version of Put.
|
||||||
//
|
//
|
||||||
// src must satisfy the same conditions as the dst argument to GetMulti.
|
// src must satisfy the same conditions as the dst argument to GetMulti.
|
||||||
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
|
// TODO(jba): rewrite in terms of Mutate.
|
||||||
|
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (_ []*Key, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
mutations, err := putMutations(keys, src)
|
mutations, err := putMutations(keys, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -540,7 +551,11 @@ func (c *Client) Delete(ctx context.Context, key *Key) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteMulti is a batch version of Delete.
|
// DeleteMulti is a batch version of Delete.
|
||||||
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
|
// TODO(jba): rewrite in terms of Mutate.
|
||||||
|
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
mutations, err := deleteMutations(keys)
|
mutations, err := deleteMutations(keys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -572,3 +587,41 @@ func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
|
||||||
}
|
}
|
||||||
return mutations, nil
|
return mutations, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Mutate applies one or more mutations atomically.
|
||||||
|
// It returns the keys of the argument Mutations, in the same order.
|
||||||
|
//
|
||||||
|
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
|
||||||
|
// Mutate returns a MultiError in this case even if there is only one Mutation.
|
||||||
|
func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (_ []*Key, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
|
pmuts, err := mutationProtos(muts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req := &pb.CommitRequest{
|
||||||
|
ProjectId: c.dataset,
|
||||||
|
Mutations: pmuts,
|
||||||
|
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
|
||||||
|
}
|
||||||
|
resp, err := c.client.Commit(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Copy any newly minted keys into the returned keys.
|
||||||
|
ret := make([]*Key, len(muts))
|
||||||
|
for i, mut := range muts {
|
||||||
|
if mut.key.Incomplete() {
|
||||||
|
// This key is in the mutation results.
|
||||||
|
ret[i], err = protoToKey(resp.MutationResults[i].Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("datastore: internal error: server returned an invalid key")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ret[i] = mut.key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
BIN
vendor/cloud.google.com/go/datastore/datastore.replay
generated
vendored
BIN
vendor/cloud.google.com/go/datastore/datastore.replay
generated
vendored
Binary file not shown.
64
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
64
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
|
@ -263,6 +263,43 @@ type Y2 struct {
|
||||||
F []int64
|
F []int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Pointers struct {
|
||||||
|
Pi *int
|
||||||
|
Ps *string
|
||||||
|
Pb *bool
|
||||||
|
Pf *float64
|
||||||
|
Pg *GeoPoint
|
||||||
|
Pt *time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type PointersOmitEmpty struct {
|
||||||
|
Pi *int `datastore:",omitempty"`
|
||||||
|
Ps *string `datastore:",omitempty"`
|
||||||
|
Pb *bool `datastore:",omitempty"`
|
||||||
|
Pf *float64 `datastore:",omitempty"`
|
||||||
|
Pg *GeoPoint `datastore:",omitempty"`
|
||||||
|
Pt *time.Time `datastore:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func populatedPointers() *Pointers {
|
||||||
|
var (
|
||||||
|
i int
|
||||||
|
s string
|
||||||
|
b bool
|
||||||
|
f float64
|
||||||
|
g GeoPoint
|
||||||
|
t time.Time
|
||||||
|
)
|
||||||
|
return &Pointers{
|
||||||
|
Pi: &i,
|
||||||
|
Ps: &s,
|
||||||
|
Pb: &b,
|
||||||
|
Pf: &f,
|
||||||
|
Pg: &g,
|
||||||
|
Pt: &t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type Tagged struct {
|
type Tagged struct {
|
||||||
A int `datastore:"a,noindex"`
|
A int `datastore:"a,noindex"`
|
||||||
B []int `datastore:"b"`
|
B []int `datastore:"b"`
|
||||||
|
@ -406,10 +443,6 @@ type PtrToStructField struct {
|
||||||
|
|
||||||
var two int = 2
|
var two int = 2
|
||||||
|
|
||||||
type PtrToInt struct {
|
|
||||||
I *int
|
|
||||||
}
|
|
||||||
|
|
||||||
type EmbeddedTime struct {
|
type EmbeddedTime struct {
|
||||||
time.Time
|
time.Time
|
||||||
}
|
}
|
||||||
|
@ -1645,15 +1678,6 @@ var testCases = []testCase{
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"save struct with pointer to int field",
|
|
||||||
&PtrToInt{
|
|
||||||
I: &two,
|
|
||||||
},
|
|
||||||
&PtrToInt{},
|
|
||||||
"unsupported struct field",
|
|
||||||
"",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"struct with nil ptr to struct fields",
|
"struct with nil ptr to struct fields",
|
||||||
&PtrToStructField{
|
&PtrToStructField{
|
||||||
|
@ -1903,6 +1927,20 @@ var testCases = []testCase{
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"pointer fields: nil",
|
||||||
|
&Pointers{},
|
||||||
|
&Pointers{},
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pointer fields: populated with zeroes",
|
||||||
|
populatedPointers(),
|
||||||
|
populatedPointers(),
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkErr returns the empty string if either both want and err are zero,
|
// checkErr returns the empty string if either both want and err are zero,
|
||||||
|
|
49
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
49
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
|
@ -15,8 +15,6 @@
|
||||||
/*
|
/*
|
||||||
Package datastore provides a client for Google Cloud Datastore.
|
Package datastore provides a client for Google Cloud Datastore.
|
||||||
|
|
||||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
|
||||||
|
|
||||||
|
|
||||||
Basic Operations
|
Basic Operations
|
||||||
|
|
||||||
|
@ -43,7 +41,8 @@ Valid value types are:
|
||||||
- time.Time (stored with microsecond precision),
|
- time.Time (stored with microsecond precision),
|
||||||
- structs whose fields are all valid value types,
|
- structs whose fields are all valid value types,
|
||||||
- pointers to structs whose fields are all valid value types,
|
- pointers to structs whose fields are all valid value types,
|
||||||
- slices of any of the above.
|
- slices of any of the above,
|
||||||
|
- pointers to a signed integer, bool, string, float32, or float64.
|
||||||
|
|
||||||
Slices of structs are valid, as are structs that contain slices.
|
Slices of structs are valid, as are structs that contain slices.
|
||||||
|
|
||||||
|
@ -86,6 +85,10 @@ GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
|
||||||
Delete functions. They take a []*Key instead of a *Key, and may return a
|
Delete functions. They take a []*Key instead of a *Key, and may return a
|
||||||
datastore.MultiError when encountering partial failure.
|
datastore.MultiError when encountering partial failure.
|
||||||
|
|
||||||
|
Mutate generalizes PutMulti and DeleteMulti to a sequence of any Datastore mutations.
|
||||||
|
It takes a series of mutations created with NewInsert, NewUpdate, NewUpsert and
|
||||||
|
NewDelete and applies them atomically.
|
||||||
|
|
||||||
|
|
||||||
Properties
|
Properties
|
||||||
|
|
||||||
|
@ -118,9 +121,10 @@ field name. A "-" tag name means that the datastore will ignore that field.
|
||||||
|
|
||||||
The only valid options are "omitempty", "noindex" and "flatten".
|
The only valid options are "omitempty", "noindex" and "flatten".
|
||||||
|
|
||||||
If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
|
If the options include "omitempty" and the value of the field is empty, then the
|
||||||
The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
|
field will be omitted on Save. The empty values are false, 0, any nil pointer or
|
||||||
Struct field values will never be empty.
|
interface value, and any array, slice, map, or string of length zero. Struct field
|
||||||
|
values will never be empty, except for nil pointers.
|
||||||
|
|
||||||
If options include "noindex" then the field will not be indexed. All fields are indexed
|
If options include "noindex" then the field will not be indexed. All fields are indexed
|
||||||
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
|
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
|
||||||
|
@ -154,6 +158,36 @@ Example code:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Slice Fields
|
||||||
|
|
||||||
|
A field of slice type corresponds to a Datastore array property, except for []byte, which corresponds
|
||||||
|
to a Datastore blob.
|
||||||
|
|
||||||
|
Zero-length slice fields are not saved. Slice fields of length 1 or greater are saved
|
||||||
|
as Datastore arrays. When a zero-length Datastore array is loaded into a slice field,
|
||||||
|
the slice field remains unchanged.
|
||||||
|
|
||||||
|
If a non-array value is loaded into a slice field, the result will be a slice with
|
||||||
|
one element, containing the value.
|
||||||
|
|
||||||
|
Loading Nulls
|
||||||
|
|
||||||
|
Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value.
|
||||||
|
Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value.
|
||||||
|
Loading a Null into a pointer field results in nil.
|
||||||
|
Loading a Null into a field of struct type is an error.
|
||||||
|
|
||||||
|
Pointer Fields
|
||||||
|
|
||||||
|
A struct field can be a pointer to a signed integer, floating-point number, string or
|
||||||
|
bool. Putting a non-nil pointer will store its dereferenced value. Putting a nil
|
||||||
|
pointer will store a Datastore Null property, unless the field is marked omitempty,
|
||||||
|
in which case no property will be stored.
|
||||||
|
|
||||||
|
Loading a Null into a pointer field sets the pointer to nil. Loading any other value
|
||||||
|
allocates new storage with the value, and sets the field to point to it.
|
||||||
|
|
||||||
|
|
||||||
Key Field
|
Key Field
|
||||||
|
|
||||||
If the struct contains a *datastore.Key field tagged with the name "__key__",
|
If the struct contains a *datastore.Key field tagged with the name "__key__",
|
||||||
|
@ -436,6 +470,9 @@ Example code:
|
||||||
fmt.Printf("Count=%d\n", count)
|
fmt.Printf("Count=%d\n", count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pass the ReadOnly option to RunInTransaction if your transaction is used only for Get,
|
||||||
|
GetMulti or queries. Read-only transactions are more efficient.
|
||||||
|
|
||||||
Google Cloud Datastore Emulator
|
Google Cloud Datastore Emulator
|
||||||
|
|
||||||
This package supports the Cloud Datastore emulator, which is useful for testing and
|
This package supports the Cloud Datastore emulator, which is useful for testing and
|
||||||
|
|
22
vendor/cloud.google.com/go/datastore/example_test.go
generated
vendored
22
vendor/cloud.google.com/go/datastore/example_test.go
generated
vendored
|
@ -396,6 +396,28 @@ func ExampleClient_GetAll() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExampleClient_Mutate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := datastore.NewClient(ctx, "project-id")
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
key1 := datastore.NameKey("Post", "post1", nil)
|
||||||
|
key2 := datastore.NameKey("Post", "post2", nil)
|
||||||
|
key3 := datastore.NameKey("Post", "post3", nil)
|
||||||
|
key4 := datastore.NameKey("Post", "post4", nil)
|
||||||
|
|
||||||
|
_, err = client.Mutate(ctx,
|
||||||
|
datastore.NewInsert(key1, Post{Title: "Post 1"}),
|
||||||
|
datastore.NewUpsert(key2, Post{Title: "Post 2"}),
|
||||||
|
datastore.NewUpdate(key3, Post{Title: "Post 3"}),
|
||||||
|
datastore.NewDelete(key4))
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ExampleCommit_Key() {
|
func ExampleCommit_Key() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := datastore.NewClient(ctx, "")
|
client, err := datastore.NewClient(ctx, "")
|
||||||
|
|
160
vendor/cloud.google.com/go/datastore/integration_test.go
generated
vendored
160
vendor/cloud.google.com/go/datastore/integration_test.go
generated
vendored
|
@ -35,6 +35,8 @@ import (
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(djd): Make test entity clean up more robust: some test entities may
|
// TODO(djd): Make test entity clean up more robust: some test entities may
|
||||||
|
@ -1051,6 +1053,51 @@ func TestTransaction(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadOnlyTransaction(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Integration tests skipped in short mode")
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
client := newClient(ctx, t, nil)
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
type value struct{ N int }
|
||||||
|
|
||||||
|
// Put a value.
|
||||||
|
const n = 5
|
||||||
|
v := &value{N: n}
|
||||||
|
key, err := client.Put(ctx, IncompleteKey("roTxn", nil), v)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer client.Delete(ctx, key)
|
||||||
|
|
||||||
|
// Read it from a read-only transaction.
|
||||||
|
_, err = client.RunInTransaction(ctx, func(tx *Transaction) error {
|
||||||
|
if err := tx.Get(key, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, ReadOnly)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if v.N != n {
|
||||||
|
t.Fatalf("got %d, want %d", v.N, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempting to write from a read-only transaction is an error.
|
||||||
|
_, err = client.RunInTransaction(ctx, func(tx *Transaction) error {
|
||||||
|
if _, err := tx.Put(key, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, ReadOnly)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("got nil, want error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestNilPointers(t *testing.T) {
|
func TestNilPointers(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client := newTestClient(ctx, t)
|
client := newTestClient(ctx, t)
|
||||||
|
@ -1115,3 +1162,116 @@ func TestNestedRepeatedElementNoIndex(t *testing.T) {
|
||||||
t.Fatalf("client.Delete: %v", err)
|
t.Fatalf("client.Delete: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPointerFields(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := newTestClient(ctx, t)
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
want := populatedPointers()
|
||||||
|
key, err := client.Put(ctx, IncompleteKey("pointers", nil), want)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var got Pointers
|
||||||
|
if err := client.Get(ctx, key, &got); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if got.Pi == nil || *got.Pi != *want.Pi {
|
||||||
|
t.Errorf("Pi: got %v, want %v", got.Pi, *want.Pi)
|
||||||
|
}
|
||||||
|
if got.Ps == nil || *got.Ps != *want.Ps {
|
||||||
|
t.Errorf("Ps: got %v, want %v", got.Ps, *want.Ps)
|
||||||
|
}
|
||||||
|
if got.Pb == nil || *got.Pb != *want.Pb {
|
||||||
|
t.Errorf("Pb: got %v, want %v", got.Pb, *want.Pb)
|
||||||
|
}
|
||||||
|
if got.Pf == nil || *got.Pf != *want.Pf {
|
||||||
|
t.Errorf("Pf: got %v, want %v", got.Pf, *want.Pf)
|
||||||
|
}
|
||||||
|
if got.Pg == nil || *got.Pg != *want.Pg {
|
||||||
|
t.Errorf("Pg: got %v, want %v", got.Pg, *want.Pg)
|
||||||
|
}
|
||||||
|
if got.Pt == nil || !got.Pt.Equal(*want.Pt) {
|
||||||
|
t.Errorf("Pt: got %v, want %v", got.Pt, *want.Pt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMutate(t *testing.T) {
|
||||||
|
// test Client.Mutate
|
||||||
|
testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) {
|
||||||
|
return client.Mutate(ctx, muts...)
|
||||||
|
})
|
||||||
|
// test Transaction.Mutate
|
||||||
|
testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) {
|
||||||
|
var pkeys []*PendingKey
|
||||||
|
commit, err := client.RunInTransaction(ctx, func(tx *Transaction) error {
|
||||||
|
var err error
|
||||||
|
pkeys, err = tx.Mutate(muts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var keys []*Key
|
||||||
|
for _, pk := range pkeys {
|
||||||
|
keys = append(keys, commit.Key(pk))
|
||||||
|
}
|
||||||
|
return keys, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMutate(t *testing.T, mutate func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error)) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := newTestClient(ctx, t)
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
type T struct{ I int }
|
||||||
|
|
||||||
|
check := func(k *Key, want interface{}) {
|
||||||
|
var x T
|
||||||
|
err := client.Get(ctx, k, &x)
|
||||||
|
switch want := want.(type) {
|
||||||
|
case error:
|
||||||
|
if err != want {
|
||||||
|
t.Errorf("key %s: got error %v, want %v", k, err, want)
|
||||||
|
}
|
||||||
|
case int:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("key %s: %v", k, err)
|
||||||
|
}
|
||||||
|
if x.I != want {
|
||||||
|
t.Errorf("key %s: got %d, want %d", k, x.I, want)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("check: bad arg")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
keys, err := mutate(ctx, client,
|
||||||
|
NewInsert(IncompleteKey("t", nil), &T{1}),
|
||||||
|
NewUpsert(IncompleteKey("t", nil), &T{2}),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
check(keys[0], 1)
|
||||||
|
check(keys[1], 2)
|
||||||
|
|
||||||
|
_, err = mutate(ctx, client,
|
||||||
|
NewUpdate(keys[0], &T{3}),
|
||||||
|
NewDelete(keys[1]),
|
||||||
|
)
|
||||||
|
check(keys[0], 3)
|
||||||
|
check(keys[1], ErrNoSuchEntity)
|
||||||
|
|
||||||
|
_, err = mutate(ctx, client, NewInsert(keys[0], &T{4}))
|
||||||
|
if got, want := status.Code(err), codes.AlreadyExists; got != want {
|
||||||
|
t.Errorf("Insert existing key: got %s, want %s", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = mutate(ctx, client, NewUpdate(keys[1], &T{4}))
|
||||||
|
if got, want := status.Code(err), codes.NotFound; got != want {
|
||||||
|
t.Errorf("Update non-existing key: got %s, want %s", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
43
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
43
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
|
@ -60,6 +60,10 @@ func typeMismatchReason(p Property, v reflect.Value) string {
|
||||||
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
|
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func overflowReason(x interface{}, v reflect.Value) string {
|
||||||
|
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||||
|
}
|
||||||
|
|
||||||
type propertyLoader struct {
|
type propertyLoader struct {
|
||||||
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
|
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
|
||||||
// been seen so far. The map is constructed lazily.
|
// been seen so far. The map is constructed lazily.
|
||||||
|
@ -243,7 +247,7 @@ func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// setVal sets 'v' to the value of the Property 'p'.
|
// setVal sets 'v' to the value of the Property 'p'.
|
||||||
func setVal(v reflect.Value, p Property) string {
|
func setVal(v reflect.Value, p Property) (s string) {
|
||||||
pValue := p.Value
|
pValue := p.Value
|
||||||
switch v.Kind() {
|
switch v.Kind() {
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
@ -252,7 +256,7 @@ func setVal(v reflect.Value, p Property) string {
|
||||||
return typeMismatchReason(p, v)
|
return typeMismatchReason(p, v)
|
||||||
}
|
}
|
||||||
if v.OverflowInt(x) {
|
if v.OverflowInt(x) {
|
||||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
return overflowReason(x, v)
|
||||||
}
|
}
|
||||||
v.SetInt(x)
|
v.SetInt(x)
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
|
@ -273,12 +277,12 @@ func setVal(v reflect.Value, p Property) string {
|
||||||
return typeMismatchReason(p, v)
|
return typeMismatchReason(p, v)
|
||||||
}
|
}
|
||||||
if v.OverflowFloat(x) {
|
if v.OverflowFloat(x) {
|
||||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
return overflowReason(x, v)
|
||||||
}
|
}
|
||||||
v.SetFloat(x)
|
v.SetFloat(x)
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
// v must be either a pointer to a Key or Entity.
|
// v must be a pointer to either a Key, an Entity, or one of the supported basic types.
|
||||||
if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct {
|
if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct && !isValidPointerType(v.Type().Elem()) {
|
||||||
return typeMismatchReason(p, v)
|
return typeMismatchReason(p, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,21 +294,38 @@ func setVal(v reflect.Value, p Property) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
switch x := pValue.(type) {
|
if x, ok := p.Value.(*Key); ok {
|
||||||
case *Key:
|
|
||||||
if _, ok := v.Interface().(*Key); !ok {
|
if _, ok := v.Interface().(*Key); !ok {
|
||||||
return typeMismatchReason(p, v)
|
return typeMismatchReason(p, v)
|
||||||
}
|
}
|
||||||
v.Set(reflect.ValueOf(x))
|
v.Set(reflect.ValueOf(x))
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if v.IsNil() {
|
||||||
|
v.Set(reflect.New(v.Type().Elem()))
|
||||||
|
}
|
||||||
|
switch x := pValue.(type) {
|
||||||
case *Entity:
|
case *Entity:
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
err := loadEntity(v.Interface(), x)
|
err := loadEntity(v.Interface(), x)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err.Error()
|
return err.Error()
|
||||||
}
|
}
|
||||||
|
case int64:
|
||||||
|
if v.Elem().OverflowInt(x) {
|
||||||
|
return overflowReason(x, v.Elem())
|
||||||
|
}
|
||||||
|
v.Elem().SetInt(x)
|
||||||
|
case float64:
|
||||||
|
if v.Elem().OverflowFloat(x) {
|
||||||
|
return overflowReason(x, v.Elem())
|
||||||
|
}
|
||||||
|
v.Elem().SetFloat(x)
|
||||||
|
case bool:
|
||||||
|
v.Elem().SetBool(x)
|
||||||
|
case string:
|
||||||
|
v.Elem().SetString(x)
|
||||||
|
case GeoPoint, time.Time:
|
||||||
|
v.Elem().Set(reflect.ValueOf(x))
|
||||||
default:
|
default:
|
||||||
return typeMismatchReason(p, v)
|
return typeMismatchReason(p, v)
|
||||||
}
|
}
|
||||||
|
|
129
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
129
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
|
@ -17,6 +17,7 @@ package datastore
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
|
||||||
|
@ -755,3 +756,131 @@ func TestKeyLoader(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadPointers(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
desc string
|
||||||
|
in []Property
|
||||||
|
want Pointers
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "nil properties load as nil pointers",
|
||||||
|
in: []Property{
|
||||||
|
Property{Name: "Pi", Value: nil},
|
||||||
|
Property{Name: "Ps", Value: nil},
|
||||||
|
Property{Name: "Pb", Value: nil},
|
||||||
|
Property{Name: "Pf", Value: nil},
|
||||||
|
Property{Name: "Pg", Value: nil},
|
||||||
|
Property{Name: "Pt", Value: nil},
|
||||||
|
},
|
||||||
|
want: Pointers{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "missing properties load as nil pointers",
|
||||||
|
in: []Property(nil),
|
||||||
|
want: Pointers{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "non-nil properties load as the appropriate values",
|
||||||
|
in: []Property{
|
||||||
|
Property{Name: "Pi", Value: int64(1)},
|
||||||
|
Property{Name: "Ps", Value: "x"},
|
||||||
|
Property{Name: "Pb", Value: true},
|
||||||
|
Property{Name: "Pf", Value: 3.14},
|
||||||
|
Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}},
|
||||||
|
Property{Name: "Pt", Value: time.Unix(100, 0)},
|
||||||
|
},
|
||||||
|
want: func() Pointers {
|
||||||
|
p := populatedPointers()
|
||||||
|
*p.Pi = 1
|
||||||
|
*p.Ps = "x"
|
||||||
|
*p.Pb = true
|
||||||
|
*p.Pf = 3.14
|
||||||
|
*p.Pg = GeoPoint{Lat: 1, Lng: 2}
|
||||||
|
*p.Pt = time.Unix(100, 0)
|
||||||
|
return *p
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
var got Pointers
|
||||||
|
if err := LoadStruct(&got, test.in); err != nil {
|
||||||
|
t.Fatalf("%s: %v", test.desc, err)
|
||||||
|
}
|
||||||
|
if !testutil.Equal(got, test.want) {
|
||||||
|
t.Errorf("%s:\ngot %+v\nwant %+v", test.desc, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadNonArrayIntoSlice(t *testing.T) {
|
||||||
|
// Loading a non-array value into a slice field results in a slice of size 1.
|
||||||
|
var got struct{ S []string }
|
||||||
|
if err := LoadStruct(&got, []Property{{Name: "S", Value: "x"}}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if want := []string{"x"}; !testutil.Equal(got.S, want) {
|
||||||
|
t.Errorf("got %#v, want %#v", got.S, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadEmptyArrayIntoSlice(t *testing.T) {
|
||||||
|
// Loading an empty array into a slice field is a no-op.
|
||||||
|
var got = struct{ S []string }{[]string{"x"}}
|
||||||
|
if err := LoadStruct(&got, []Property{{Name: "S", Value: []interface{}{}}}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if want := []string{"x"}; !testutil.Equal(got.S, want) {
|
||||||
|
t.Errorf("got %#v, want %#v", got.S, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadNull(t *testing.T) {
|
||||||
|
// Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value.
|
||||||
|
// Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value.
|
||||||
|
// (As expected from the behavior of slices and nulls with basic types.)
|
||||||
|
type S struct {
|
||||||
|
I int64
|
||||||
|
F float64
|
||||||
|
S string
|
||||||
|
B bool
|
||||||
|
A []string
|
||||||
|
}
|
||||||
|
got := S{
|
||||||
|
I: 1,
|
||||||
|
F: 1.0,
|
||||||
|
S: "1",
|
||||||
|
B: true,
|
||||||
|
A: []string{"X"},
|
||||||
|
}
|
||||||
|
want := S{A: []string{""}}
|
||||||
|
props := []Property{{Name: "I"}, {Name: "F"}, {Name: "S"}, {Name: "B"}, {Name: "A"}}
|
||||||
|
if err := LoadStruct(&got, props); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !testutil.Equal(got, want) {
|
||||||
|
t.Errorf("got %+v, want %+v", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loading a Null into a pointer to struct field results in a nil field.
|
||||||
|
got2 := struct{ X *S }{X: &S{}}
|
||||||
|
if err := LoadStruct(&got2, []Property{{Name: "X"}}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if got2.X != nil {
|
||||||
|
t.Errorf("got %v, want nil", got2.X)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loading a Null into a struct field is an error.
|
||||||
|
got3 := struct{ X S }{}
|
||||||
|
err := LoadStruct(&got3, []Property{{Name: "X"}})
|
||||||
|
if err == nil {
|
||||||
|
t.Error("got nil, want error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// var got2 struct{ S []Pet }
|
||||||
|
// if err := LoadStruct(&got2, []Property{{Name: "S", Value: nil}}); err != nil {
|
||||||
|
// t.Fatal(err)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
129
vendor/cloud.google.com/go/datastore/mutation.go
generated
vendored
Normal file
129
vendor/cloud.google.com/go/datastore/mutation.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package datastore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Mutation represents a change to a Datastore entity.
|
||||||
|
type Mutation struct {
|
||||||
|
key *Key // needed for transaction PendingKeys and to dedup deletions
|
||||||
|
mut *pb.Mutation
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mutation) isDelete() bool {
|
||||||
|
_, ok := m.mut.Operation.(*pb.Mutation_Delete)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInsert creates a mutation that will save the entity src into the datastore with
|
||||||
|
// key k, returning an error if k already exists.
|
||||||
|
// See Client.Put for valid values of src.
|
||||||
|
func NewInsert(k *Key, src interface{}) *Mutation {
|
||||||
|
if !k.valid() {
|
||||||
|
return &Mutation{err: ErrInvalidKey}
|
||||||
|
}
|
||||||
|
p, err := saveEntity(k, src)
|
||||||
|
if err != nil {
|
||||||
|
return &Mutation{err: err}
|
||||||
|
}
|
||||||
|
return &Mutation{
|
||||||
|
key: k,
|
||||||
|
mut: &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpsert creates a mutation that saves the entity src into the datastore with key
|
||||||
|
// k, whether or not k exists. See Client.Put for valid values of src.
|
||||||
|
func NewUpsert(k *Key, src interface{}) *Mutation {
|
||||||
|
if !k.valid() {
|
||||||
|
return &Mutation{err: ErrInvalidKey}
|
||||||
|
}
|
||||||
|
p, err := saveEntity(k, src)
|
||||||
|
if err != nil {
|
||||||
|
return &Mutation{err: err}
|
||||||
|
}
|
||||||
|
return &Mutation{
|
||||||
|
key: k,
|
||||||
|
mut: &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpdate creates a mutation that replaces the entity in the datastore with key k,
|
||||||
|
// returning an error if k does not exist. See Client.Put for valid values of src.
|
||||||
|
func NewUpdate(k *Key, src interface{}) *Mutation {
|
||||||
|
if !k.valid() {
|
||||||
|
return &Mutation{err: ErrInvalidKey}
|
||||||
|
}
|
||||||
|
if k.Incomplete() {
|
||||||
|
return &Mutation{err: fmt.Errorf("datastore: can't update the incomplete key: %v", k)}
|
||||||
|
}
|
||||||
|
p, err := saveEntity(k, src)
|
||||||
|
if err != nil {
|
||||||
|
return &Mutation{err: err}
|
||||||
|
}
|
||||||
|
return &Mutation{
|
||||||
|
key: k,
|
||||||
|
mut: &pb.Mutation{Operation: &pb.Mutation_Update{Update: p}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDelete creates a mutation that deletes the entity with key k.
|
||||||
|
func NewDelete(k *Key) *Mutation {
|
||||||
|
if !k.valid() {
|
||||||
|
return &Mutation{err: ErrInvalidKey}
|
||||||
|
}
|
||||||
|
if k.Incomplete() {
|
||||||
|
return &Mutation{err: fmt.Errorf("datastore: can't delete the incomplete key: %v", k)}
|
||||||
|
}
|
||||||
|
return &Mutation{
|
||||||
|
key: k,
|
||||||
|
mut: &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mutationProtos(muts []*Mutation) ([]*pb.Mutation, error) {
|
||||||
|
// If any of the mutations have errors, collect and return them.
|
||||||
|
var merr MultiError
|
||||||
|
for i, m := range muts {
|
||||||
|
if m.err != nil {
|
||||||
|
if merr == nil {
|
||||||
|
merr = make(MultiError, len(muts))
|
||||||
|
}
|
||||||
|
merr[i] = m.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if merr != nil {
|
||||||
|
return nil, merr
|
||||||
|
}
|
||||||
|
var protos []*pb.Mutation
|
||||||
|
// Collect protos. Remove duplicate deletions (see deleteMutations).
|
||||||
|
seen := map[string]bool{}
|
||||||
|
for _, m := range muts {
|
||||||
|
if m.isDelete() {
|
||||||
|
ks := m.key.String()
|
||||||
|
if seen[ks] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[ks] = true
|
||||||
|
}
|
||||||
|
protos = append(protos, m.mut)
|
||||||
|
}
|
||||||
|
return protos, nil
|
||||||
|
}
|
150
vendor/cloud.google.com/go/datastore/mutation_test.go
generated
vendored
Normal file
150
vendor/cloud.google.com/go/datastore/mutation_test.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package datastore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMutationProtos(t *testing.T) {
|
||||||
|
var keys []*Key
|
||||||
|
for i := 1; i <= 4; i++ {
|
||||||
|
k := IDKey("kind", int64(i), nil)
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
entity := &PropertyList{{Name: "n", Value: "v"}}
|
||||||
|
entityForKey := func(k *Key) *pb.Entity {
|
||||||
|
return &pb.Entity{
|
||||||
|
Key: keyToProto(k),
|
||||||
|
Properties: map[string]*pb.Value{
|
||||||
|
"n": &pb.Value{ValueType: &pb.Value_StringValue{StringValue: "v"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, test := range []struct {
|
||||||
|
desc string
|
||||||
|
in []*Mutation
|
||||||
|
want []*pb.Mutation
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "nil",
|
||||||
|
in: nil,
|
||||||
|
want: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "empty",
|
||||||
|
in: []*Mutation{},
|
||||||
|
want: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "various",
|
||||||
|
in: []*Mutation{
|
||||||
|
NewInsert(keys[0], entity),
|
||||||
|
NewUpsert(keys[1], entity),
|
||||||
|
NewUpdate(keys[2], entity),
|
||||||
|
NewDelete(keys[3]),
|
||||||
|
},
|
||||||
|
want: []*pb.Mutation{
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[0])}},
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: entityForKey(keys[1])}},
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Update{Update: entityForKey(keys[2])}},
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[3])}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "duplicate deletes",
|
||||||
|
in: []*Mutation{
|
||||||
|
NewDelete(keys[0]),
|
||||||
|
NewInsert(keys[1], entity),
|
||||||
|
NewDelete(keys[0]),
|
||||||
|
NewDelete(keys[2]),
|
||||||
|
NewDelete(keys[0]),
|
||||||
|
},
|
||||||
|
want: []*pb.Mutation{
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[0])}},
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[1])}},
|
||||||
|
&pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[2])}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got, err := mutationProtos(test.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %v", test.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if diff := testutil.Diff(got, test.want); diff != "" {
|
||||||
|
t.Errorf("%s: %s", test.desc, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMutationProtosErrors(t *testing.T) {
|
||||||
|
entity := &PropertyList{{Name: "n", Value: "v"}}
|
||||||
|
k := IDKey("kind", 1, nil)
|
||||||
|
ik := IncompleteKey("kind", nil)
|
||||||
|
for _, test := range []struct {
|
||||||
|
desc string
|
||||||
|
in []*Mutation
|
||||||
|
want []int // non-nil indexes of MultiError
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "invalid key",
|
||||||
|
in: []*Mutation{
|
||||||
|
NewInsert(nil, entity),
|
||||||
|
NewUpdate(nil, entity),
|
||||||
|
NewUpsert(nil, entity),
|
||||||
|
NewDelete(nil),
|
||||||
|
},
|
||||||
|
want: []int{0, 1, 2, 3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "incomplete key",
|
||||||
|
in: []*Mutation{
|
||||||
|
NewInsert(ik, entity),
|
||||||
|
NewUpdate(ik, entity),
|
||||||
|
NewUpsert(ik, entity),
|
||||||
|
NewDelete(ik),
|
||||||
|
},
|
||||||
|
want: []int{1, 3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "bad entity",
|
||||||
|
in: []*Mutation{
|
||||||
|
NewInsert(k, 1),
|
||||||
|
NewUpdate(k, 2),
|
||||||
|
NewUpsert(k, 3),
|
||||||
|
},
|
||||||
|
want: []int{0, 1, 2},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
_, err := mutationProtos(test.in)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got nil, want error", test.desc)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var got []int
|
||||||
|
for i, err := range err.(MultiError) {
|
||||||
|
if err != nil {
|
||||||
|
got = append(got, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !testutil.Equal(got, test.want) {
|
||||||
|
t.Errorf("%s: got errors at %v, want at %v", test.desc, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
59
vendor/cloud.google.com/go/datastore/oc_test.go
generated
vendored
Normal file
59
vendor/cloud.google.com/go/datastore/oc_test.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package datastore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOCTracing(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Integration tests skipped in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
te := &testExporter{}
|
||||||
|
trace.RegisterExporter(te)
|
||||||
|
defer trace.UnregisterExporter(te)
|
||||||
|
trace.SetDefaultSampler(trace.AlwaysSample())
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
client := newTestClient(ctx, t)
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
type SomeValue struct {
|
||||||
|
S string
|
||||||
|
}
|
||||||
|
_, err := client.Put(ctx, IncompleteKey("SomeKey", nil), &SomeValue{"foo"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("client.Put: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(te.spans) != 1 {
|
||||||
|
t.Fatalf("Expected 1 span to be created, but got %d", len(te.spans))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testExporter struct {
|
||||||
|
spans []*trace.SpanData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (te *testExporter) ExportSpan(s *trace.SpanData) {
|
||||||
|
te.spans = append(te.spans, s)
|
||||||
|
}
|
21
vendor/cloud.google.com/go/datastore/query.go
generated
vendored
21
vendor/cloud.google.com/go/datastore/query.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
|
@ -445,7 +446,10 @@ func (q *Query) toProto(req *pb.RunQueryRequest) error {
|
||||||
// with the sum of the query's offset and limit. Unless the result count is
|
// with the sum of the query's offset and limit. Unless the result count is
|
||||||
// expected to be small, it is best to specify a limit; otherwise Count will
|
// expected to be small, it is best to specify a limit; otherwise Count will
|
||||||
// continue until it finishes counting or the provided context expires.
|
// continue until it finishes counting or the provided context expires.
|
||||||
func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
|
func (c *Client) Count(ctx context.Context, q *Query) (_ int, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Count")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
// Check that the query is well-formed.
|
// Check that the query is well-formed.
|
||||||
if q.err != nil {
|
if q.err != nil {
|
||||||
return 0, q.err
|
return 0, q.err
|
||||||
|
@ -492,7 +496,10 @@ func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
|
||||||
// expected to be small, it is best to specify a limit; otherwise GetAll will
|
// expected to be small, it is best to specify a limit; otherwise GetAll will
|
||||||
// continue until it finishes collecting results or the provided context
|
// continue until it finishes collecting results or the provided context
|
||||||
// expires.
|
// expires.
|
||||||
func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) {
|
func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) (_ []*Key, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.GetAll")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dv reflect.Value
|
dv reflect.Value
|
||||||
mat multiArgType
|
mat multiArgType
|
||||||
|
@ -575,6 +582,9 @@ func (c *Client) Run(ctx context.Context, q *Query) *Iterator {
|
||||||
ProjectId: c.dataset,
|
ProjectId: c.dataset,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Run")
|
||||||
|
defer func() { trace.EndSpan(ctx, t.err) }()
|
||||||
if q.namespace != "" {
|
if q.namespace != "" {
|
||||||
t.req.PartitionId = &pb.PartitionId{
|
t.req.PartitionId = &pb.PartitionId{
|
||||||
NamespaceId: q.namespace,
|
NamespaceId: q.namespace,
|
||||||
|
@ -622,7 +632,7 @@ type Iterator struct {
|
||||||
// If the query is not keys only and dst is non-nil, it also loads the entity
|
// If the query is not keys only and dst is non-nil, it also loads the entity
|
||||||
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
|
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
|
||||||
// the same semantics and possible errors as for the Get function.
|
// the same semantics and possible errors as for the Get function.
|
||||||
func (t *Iterator) Next(dst interface{}) (*Key, error) {
|
func (t *Iterator) Next(dst interface{}) (_ *Key, err error) {
|
||||||
k, e, err := t.next()
|
k, e, err := t.next()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -725,7 +735,10 @@ func (t *Iterator) nextBatch() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cursor returns a cursor for the iterator's current location.
|
// Cursor returns a cursor for the iterator's current location.
|
||||||
func (t *Iterator) Cursor() (Cursor, error) {
|
func (t *Iterator) Cursor() (_ Cursor, err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Query.Cursor")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
// If there is still an offset, we need to the skip those results first.
|
// If there is still an offset, we need to the skip those results first.
|
||||||
for t.err == nil && t.offset > 0 {
|
for t.err == nil && t.offset > 0 {
|
||||||
t.err = t.nextBatch()
|
t.err = t.nextBatch()
|
||||||
|
|
43
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
43
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
|
@ -88,9 +88,19 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect
|
||||||
return saveSliceProperty(props, name, opts, v)
|
return saveSliceProperty(props, name, opts, v)
|
||||||
}
|
}
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
|
if isValidPointerType(v.Type().Elem()) {
|
||||||
|
if v.IsNil() {
|
||||||
|
// Nil pointer becomes a nil property value (unless omitempty, handled above).
|
||||||
|
p.Value = nil
|
||||||
|
*props = append(*props, p)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return saveStructProperty(props, name, opts, v.Elem())
|
||||||
|
}
|
||||||
if v.Type().Elem().Kind() != reflect.Struct {
|
if v.Type().Elem().Kind() != reflect.Struct {
|
||||||
return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type())
|
return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type())
|
||||||
}
|
}
|
||||||
|
// Pointer to struct is a special case.
|
||||||
if v.IsNil() {
|
if v.IsNil() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -395,10 +405,18 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
|
||||||
// than the top-level value.
|
// than the top-level value.
|
||||||
val.ExcludeFromIndexes = false
|
val.ExcludeFromIndexes = false
|
||||||
default:
|
default:
|
||||||
if iv != nil {
|
rv := reflect.ValueOf(iv)
|
||||||
return nil, fmt.Errorf("invalid Value type %t", iv)
|
if !rv.IsValid() {
|
||||||
|
val.ValueType = &pb.Value_NullValue{}
|
||||||
|
} else if rv.Kind() == reflect.Ptr { // non-nil pointer: dereference
|
||||||
|
if rv.IsNil() {
|
||||||
|
val.ValueType = &pb.Value_NullValue{}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
return interfaceToProto(rv.Elem().Interface(), noIndex)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("invalid Value type %T", iv)
|
||||||
}
|
}
|
||||||
val.ValueType = &pb.Value_NullValue{}
|
|
||||||
}
|
}
|
||||||
// TODO(jbd): Support EntityValue.
|
// TODO(jbd): Support EntityValue.
|
||||||
return val, nil
|
return val, nil
|
||||||
|
@ -423,3 +441,22 @@ func isEmptyValue(v reflect.Value) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isValidPointerType reports whether a struct field can be a pointer to type t
|
||||||
|
// for the purposes of saving and loading.
|
||||||
|
func isValidPointerType(t reflect.Type) bool {
|
||||||
|
if t == typeOfTime || t == typeOfGeoPoint {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return true
|
||||||
|
case reflect.Bool:
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
110
vendor/cloud.google.com/go/datastore/save_test.go
generated
vendored
110
vendor/cloud.google.com/go/datastore/save_test.go
generated
vendored
|
@ -16,22 +16,32 @@ package datastore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
|
|
||||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInterfaceToProtoNilKey(t *testing.T) {
|
func TestInterfaceToProtoNil(t *testing.T) {
|
||||||
var iv *Key
|
// A nil *Key, or a nil value of any other pointer type, should convert to a NullValue.
|
||||||
pv, err := interfaceToProto(iv, false)
|
for _, in := range []interface{}{
|
||||||
if err != nil {
|
(*Key)(nil),
|
||||||
t.Fatalf("nil key: interfaceToProto: %v", err)
|
(*int)(nil),
|
||||||
}
|
(*string)(nil),
|
||||||
|
(*bool)(nil),
|
||||||
_, ok := pv.ValueType.(*pb.Value_NullValue)
|
(*float64)(nil),
|
||||||
if !ok {
|
(*GeoPoint)(nil),
|
||||||
t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{})
|
(*time.Time)(nil),
|
||||||
|
} {
|
||||||
|
got, err := interfaceToProto(in, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%T: %v", in, err)
|
||||||
|
}
|
||||||
|
_, ok := got.ValueType.(*pb.Value_NullValue)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("%T: got: %T\nwant: %T", in, got.ValueType, &pb.Value_NullValue{})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,3 +203,83 @@ func TestSaveEntityNested(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSavePointers(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
desc string
|
||||||
|
in interface{}
|
||||||
|
want []Property
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "nil pointers save as nil-valued properties",
|
||||||
|
in: &Pointers{},
|
||||||
|
want: []Property{
|
||||||
|
Property{Name: "Pi", Value: nil},
|
||||||
|
Property{Name: "Ps", Value: nil},
|
||||||
|
Property{Name: "Pb", Value: nil},
|
||||||
|
Property{Name: "Pf", Value: nil},
|
||||||
|
Property{Name: "Pg", Value: nil},
|
||||||
|
Property{Name: "Pt", Value: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "nil omitempty pointers not saved",
|
||||||
|
in: &PointersOmitEmpty{},
|
||||||
|
want: []Property(nil),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "non-nil zero-valued pointers save as zero values",
|
||||||
|
in: populatedPointers(),
|
||||||
|
want: []Property{
|
||||||
|
Property{Name: "Pi", Value: int64(0)},
|
||||||
|
Property{Name: "Ps", Value: ""},
|
||||||
|
Property{Name: "Pb", Value: false},
|
||||||
|
Property{Name: "Pf", Value: 0.0},
|
||||||
|
Property{Name: "Pg", Value: GeoPoint{}},
|
||||||
|
Property{Name: "Pt", Value: time.Time{}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "non-nil non-zero-valued pointers save as the appropriate values",
|
||||||
|
in: func() *Pointers {
|
||||||
|
p := populatedPointers()
|
||||||
|
*p.Pi = 1
|
||||||
|
*p.Ps = "x"
|
||||||
|
*p.Pb = true
|
||||||
|
*p.Pf = 3.14
|
||||||
|
*p.Pg = GeoPoint{Lat: 1, Lng: 2}
|
||||||
|
*p.Pt = time.Unix(100, 0)
|
||||||
|
return p
|
||||||
|
}(),
|
||||||
|
want: []Property{
|
||||||
|
Property{Name: "Pi", Value: int64(1)},
|
||||||
|
Property{Name: "Ps", Value: "x"},
|
||||||
|
Property{Name: "Pb", Value: true},
|
||||||
|
Property{Name: "Pf", Value: 3.14},
|
||||||
|
Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}},
|
||||||
|
Property{Name: "Pt", Value: time.Unix(100, 0)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got, err := SaveStruct(test.in)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%s: %v", test.desc, err)
|
||||||
|
}
|
||||||
|
if !testutil.Equal(got, test.want) {
|
||||||
|
t.Errorf("%s\ngot %#v\nwant %#v\n", test.desc, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveEmptySlice(t *testing.T) {
|
||||||
|
// Zero-length slice fields are not saved.
|
||||||
|
for _, slice := range [][]string{nil, {}} {
|
||||||
|
got, err := SaveStruct(&struct{ S []string }{S: slice})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(got) != 0 {
|
||||||
|
t.Errorf("%#v: got %d properties, wanted zero", slice, len(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
130
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
130
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
|
@ -17,6 +17,7 @@ package datastore
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/trace"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
@ -32,6 +33,8 @@ var errExpiredTransaction = errors.New("datastore: transaction expired")
|
||||||
|
|
||||||
type transactionSettings struct {
|
type transactionSettings struct {
|
||||||
attempts int
|
attempts int
|
||||||
|
readOnly bool
|
||||||
|
prevID []byte // ID of the transaction to retry
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTransactionSettings creates a transactionSettings with a given TransactionOption slice.
|
// newTransactionSettings creates a transactionSettings with a given TransactionOption slice.
|
||||||
|
@ -62,6 +65,19 @@ func (w maxAttempts) apply(s *transactionSettings) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadOnly is a TransactionOption that marks the transaction as read-only.
|
||||||
|
var ReadOnly TransactionOption
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ReadOnly = readOnly{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readOnly struct{}
|
||||||
|
|
||||||
|
func (readOnly) apply(s *transactionSettings) {
|
||||||
|
s.readOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
// Transaction represents a set of datastore operations to be committed atomically.
|
// Transaction represents a set of datastore operations to be committed atomically.
|
||||||
//
|
//
|
||||||
// Operations are enqueued by calling the Put and Delete methods on Transaction
|
// Operations are enqueued by calling the Put and Delete methods on Transaction
|
||||||
|
@ -80,20 +96,35 @@ type Transaction struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTransaction starts a new transaction.
|
// NewTransaction starts a new transaction.
|
||||||
func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) {
|
func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (_ *Transaction, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.NewTransaction")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if _, ok := o.(maxAttempts); ok {
|
if _, ok := o.(maxAttempts); ok {
|
||||||
return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
|
return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
req := &pb.BeginTransactionRequest{
|
return c.newTransaction(ctx, newTransactionSettings(opts))
|
||||||
ProjectId: c.dataset,
|
}
|
||||||
|
|
||||||
|
func (c *Client) newTransaction(ctx context.Context, s *transactionSettings) (*Transaction, error) {
|
||||||
|
req := &pb.BeginTransactionRequest{ProjectId: c.dataset}
|
||||||
|
if s.readOnly {
|
||||||
|
req.TransactionOptions = &pb.TransactionOptions{
|
||||||
|
Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}},
|
||||||
|
}
|
||||||
|
} else if s.prevID != nil {
|
||||||
|
req.TransactionOptions = &pb.TransactionOptions{
|
||||||
|
Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{
|
||||||
|
PreviousTransaction: s.prevID,
|
||||||
|
}},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
resp, err := c.client.BeginTransaction(ctx, req)
|
resp, err := c.client.BeginTransaction(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Transaction{
|
return &Transaction{
|
||||||
id: resp.Transaction,
|
id: resp.Transaction,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
@ -125,10 +156,13 @@ func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption)
|
||||||
// is, it should have the same result when called multiple times. Note that
|
// is, it should have the same result when called multiple times. Note that
|
||||||
// Transaction.Get will append when unmarshalling slice fields, so it is not
|
// Transaction.Get will append when unmarshalling slice fields, so it is not
|
||||||
// necessarily idempotent.
|
// necessarily idempotent.
|
||||||
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) {
|
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (_ *Commit, err error) {
|
||||||
|
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.RunInTransaction")
|
||||||
|
defer func() { trace.EndSpan(ctx, err) }()
|
||||||
|
|
||||||
settings := newTransactionSettings(opts)
|
settings := newTransactionSettings(opts)
|
||||||
for n := 0; n < settings.attempts; n++ {
|
for n := 0; n < settings.attempts; n++ {
|
||||||
tx, err := c.NewTransaction(ctx)
|
tx, err := c.newTransaction(ctx, settings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -139,12 +173,20 @@ func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) e
|
||||||
if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
|
if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
|
||||||
return cmt, err
|
return cmt, err
|
||||||
}
|
}
|
||||||
|
// Pass this transaction's ID to the retry transaction to preserve
|
||||||
|
// transaction priority.
|
||||||
|
if !settings.readOnly {
|
||||||
|
settings.prevID = tx.id
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil, ErrConcurrentTransaction
|
return nil, ErrConcurrentTransaction
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit applies the enqueued operations atomically.
|
// Commit applies the enqueued operations atomically.
|
||||||
func (t *Transaction) Commit() (*Commit, error) {
|
func (t *Transaction) Commit() (_ *Commit, err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Commit")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
if t.id == nil {
|
if t.id == nil {
|
||||||
return nil, errExpiredTransaction
|
return nil, errExpiredTransaction
|
||||||
}
|
}
|
||||||
|
@ -181,13 +223,16 @@ func (t *Transaction) Commit() (*Commit, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rollback abandons a pending transaction.
|
// Rollback abandons a pending transaction.
|
||||||
func (t *Transaction) Rollback() error {
|
func (t *Transaction) Rollback() (err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Rollback")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
if t.id == nil {
|
if t.id == nil {
|
||||||
return errExpiredTransaction
|
return errExpiredTransaction
|
||||||
}
|
}
|
||||||
id := t.id
|
id := t.id
|
||||||
t.id = nil
|
t.id = nil
|
||||||
_, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
|
_, err = t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
|
||||||
ProjectId: t.client.dataset,
|
ProjectId: t.client.dataset,
|
||||||
Transaction: id,
|
Transaction: id,
|
||||||
})
|
})
|
||||||
|
@ -199,11 +244,14 @@ func (t *Transaction) Rollback() error {
|
||||||
// snapshot. Furthermore, if the transaction is set to a serializable isolation
|
// snapshot. Furthermore, if the transaction is set to a serializable isolation
|
||||||
// level, another transaction cannot concurrently modify the data that is read
|
// level, another transaction cannot concurrently modify the data that is read
|
||||||
// or modified by this transaction.
|
// or modified by this transaction.
|
||||||
func (t *Transaction) Get(key *Key, dst interface{}) error {
|
func (t *Transaction) Get(key *Key, dst interface{}) (err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Get")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
opts := &pb.ReadOptions{
|
opts := &pb.ReadOptions{
|
||||||
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
|
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
|
||||||
}
|
}
|
||||||
err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
|
err = t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
|
||||||
if me, ok := err.(MultiError); ok {
|
if me, ok := err.(MultiError); ok {
|
||||||
return me[0]
|
return me[0]
|
||||||
}
|
}
|
||||||
|
@ -211,7 +259,10 @@ func (t *Transaction) Get(key *Key, dst interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMulti is a batch version of Get.
|
// GetMulti is a batch version of Get.
|
||||||
func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error {
|
func (t *Transaction) GetMulti(keys []*Key, dst interface{}) (err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.GetMulti")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
if t.id == nil {
|
if t.id == nil {
|
||||||
return errExpiredTransaction
|
return errExpiredTransaction
|
||||||
}
|
}
|
||||||
|
@ -240,7 +291,11 @@ func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {
|
||||||
|
|
||||||
// PutMulti is a batch version of Put. One PendingKey is returned for each
|
// PutMulti is a batch version of Put. One PendingKey is returned for each
|
||||||
// element of src in the same order.
|
// element of src in the same order.
|
||||||
func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) {
|
// TODO(jba): rewrite in terms of Mutate.
|
||||||
|
func (t *Transaction) PutMulti(keys []*Key, src interface{}) (_ []*PendingKey, err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.PutMulti")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
if t.id == nil {
|
if t.id == nil {
|
||||||
return nil, errExpiredTransaction
|
return nil, errExpiredTransaction
|
||||||
}
|
}
|
||||||
|
@ -279,7 +334,11 @@ func (t *Transaction) Delete(key *Key) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteMulti is a batch version of Delete.
|
// DeleteMulti is a batch version of Delete.
|
||||||
func (t *Transaction) DeleteMulti(keys []*Key) error {
|
// TODO(jba): rewrite in terms of Mutate.
|
||||||
|
func (t *Transaction) DeleteMulti(keys []*Key) (err error) {
|
||||||
|
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.DeleteMulti")
|
||||||
|
defer func() { trace.EndSpan(t.ctx, err) }()
|
||||||
|
|
||||||
if t.id == nil {
|
if t.id == nil {
|
||||||
return errExpiredTransaction
|
return errExpiredTransaction
|
||||||
}
|
}
|
||||||
|
@ -291,12 +350,53 @@ func (t *Transaction) DeleteMulti(keys []*Key) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Mutate adds the mutations to the transaction. They will all be applied atomically
|
||||||
|
// upon calling Commit. Mutate returns a PendingKey for each Mutation in the argument
|
||||||
|
// list, in the same order. PendingKeys for Delete mutations are always nil.
|
||||||
|
//
|
||||||
|
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
|
||||||
|
// Mutate returns a MultiError in this case even if there is only one Mutation.
|
||||||
|
//
|
||||||
|
// For an example, see Client.Mutate.
|
||||||
|
func (t *Transaction) Mutate(muts ...*Mutation) ([]*PendingKey, error) {
|
||||||
|
if t.id == nil {
|
||||||
|
return nil, errExpiredTransaction
|
||||||
|
}
|
||||||
|
pmuts, err := mutationProtos(muts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
origin := len(t.mutations)
|
||||||
|
t.mutations = append(t.mutations, pmuts...)
|
||||||
|
// Prepare the returned handles, pre-populating where possible.
|
||||||
|
ret := make([]*PendingKey, len(muts))
|
||||||
|
for i, mut := range muts {
|
||||||
|
if mut.isDelete() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := &PendingKey{}
|
||||||
|
if mut.key.Incomplete() {
|
||||||
|
// This key will be in the final commit result.
|
||||||
|
t.pending[origin+i] = p
|
||||||
|
} else {
|
||||||
|
p.key = mut.key
|
||||||
|
}
|
||||||
|
ret[i] = p
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Commit represents the result of a committed transaction.
|
// Commit represents the result of a committed transaction.
|
||||||
type Commit struct{}
|
type Commit struct{}
|
||||||
|
|
||||||
// Key resolves a pending key handle into a final key.
|
// Key resolves a pending key handle into a final key.
|
||||||
func (c *Commit) Key(p *PendingKey) *Key {
|
func (c *Commit) Key(p *PendingKey) *Key {
|
||||||
if c != p.commit {
|
if p == nil { // if called on a *PendingKey from a Delete mutation
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If p.commit is nil, the PendingKey did not come from an incomplete key,
|
||||||
|
// so p.key is valid.
|
||||||
|
if p.commit != nil && c != p.commit {
|
||||||
panic("PendingKey was not created by corresponding transaction")
|
panic("PendingKey was not created by corresponding transaction")
|
||||||
}
|
}
|
||||||
return p.key
|
return p.key
|
||||||
|
|
78
vendor/cloud.google.com/go/datastore/transaction_test.go
generated
vendored
Normal file
78
vendor/cloud.google.com/go/datastore/transaction_test.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package datastore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewTransaction(t *testing.T) {
|
||||||
|
var got *pb.BeginTransactionRequest
|
||||||
|
client := &Client{
|
||||||
|
dataset: "project",
|
||||||
|
client: &fakeDatastoreClient{
|
||||||
|
beginTransaction: func(req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) {
|
||||||
|
got = req
|
||||||
|
return &pb.BeginTransactionResponse{
|
||||||
|
Transaction: []byte("tid"),
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
for _, test := range []struct {
|
||||||
|
settings *transactionSettings
|
||||||
|
want *pb.BeginTransactionRequest
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
&transactionSettings{},
|
||||||
|
&pb.BeginTransactionRequest{ProjectId: "project"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
&transactionSettings{readOnly: true},
|
||||||
|
&pb.BeginTransactionRequest{
|
||||||
|
ProjectId: "project",
|
||||||
|
TransactionOptions: &pb.TransactionOptions{
|
||||||
|
Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
&transactionSettings{prevID: []byte("tid")},
|
||||||
|
&pb.BeginTransactionRequest{
|
||||||
|
ProjectId: "project",
|
||||||
|
TransactionOptions: &pb.TransactionOptions{
|
||||||
|
Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{
|
||||||
|
PreviousTransaction: []byte("tid"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
_, err := client.newTransaction(ctx, test.settings)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !proto.Equal(got, test.want) {
|
||||||
|
t.Errorf("%+v:\ngot %+v\nwant %+v", test.settings, got, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
810
vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go
generated
vendored
Normal file
810
vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go
generated
vendored
Normal file
|
@ -0,0 +1,810 @@
|
||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
package dlp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"cloud.google.com/go/internal/version"
|
||||||
|
gax "github.com/googleapis/gax-go"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/api/iterator"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
"google.golang.org/api/transport"
|
||||||
|
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CallOptions contains the retry settings for each method of Client.
|
||||||
|
type CallOptions struct {
|
||||||
|
InspectContent []gax.CallOption
|
||||||
|
RedactImage []gax.CallOption
|
||||||
|
DeidentifyContent []gax.CallOption
|
||||||
|
ReidentifyContent []gax.CallOption
|
||||||
|
ListInfoTypes []gax.CallOption
|
||||||
|
CreateInspectTemplate []gax.CallOption
|
||||||
|
UpdateInspectTemplate []gax.CallOption
|
||||||
|
GetInspectTemplate []gax.CallOption
|
||||||
|
ListInspectTemplates []gax.CallOption
|
||||||
|
DeleteInspectTemplate []gax.CallOption
|
||||||
|
CreateDeidentifyTemplate []gax.CallOption
|
||||||
|
UpdateDeidentifyTemplate []gax.CallOption
|
||||||
|
GetDeidentifyTemplate []gax.CallOption
|
||||||
|
ListDeidentifyTemplates []gax.CallOption
|
||||||
|
DeleteDeidentifyTemplate []gax.CallOption
|
||||||
|
CreateDlpJob []gax.CallOption
|
||||||
|
ListDlpJobs []gax.CallOption
|
||||||
|
GetDlpJob []gax.CallOption
|
||||||
|
DeleteDlpJob []gax.CallOption
|
||||||
|
CancelDlpJob []gax.CallOption
|
||||||
|
ListJobTriggers []gax.CallOption
|
||||||
|
GetJobTrigger []gax.CallOption
|
||||||
|
DeleteJobTrigger []gax.CallOption
|
||||||
|
UpdateJobTrigger []gax.CallOption
|
||||||
|
CreateJobTrigger []gax.CallOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultClientOptions() []option.ClientOption {
|
||||||
|
return []option.ClientOption{
|
||||||
|
option.WithEndpoint("dlp.googleapis.com:443"),
|
||||||
|
option.WithScopes(DefaultAuthScopes()...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultCallOptions() *CallOptions {
|
||||||
|
retry := map[[2]string][]gax.CallOption{
|
||||||
|
{"default", "idempotent"}: {
|
||||||
|
gax.WithRetry(func() gax.Retryer {
|
||||||
|
return gax.OnCodes([]codes.Code{
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Unavailable,
|
||||||
|
}, gax.Backoff{
|
||||||
|
Initial: 100 * time.Millisecond,
|
||||||
|
Max: 60000 * time.Millisecond,
|
||||||
|
Multiplier: 1.3,
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &CallOptions{
|
||||||
|
InspectContent: retry[[2]string{"default", "idempotent"}],
|
||||||
|
RedactImage: retry[[2]string{"default", "idempotent"}],
|
||||||
|
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
|
||||||
|
ReidentifyContent: retry[[2]string{"default", "idempotent"}],
|
||||||
|
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
|
||||||
|
CreateInspectTemplate: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
UpdateInspectTemplate: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
GetInspectTemplate: retry[[2]string{"default", "idempotent"}],
|
||||||
|
ListInspectTemplates: retry[[2]string{"default", "idempotent"}],
|
||||||
|
DeleteInspectTemplate: retry[[2]string{"default", "idempotent"}],
|
||||||
|
CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
GetDeidentifyTemplate: retry[[2]string{"default", "idempotent"}],
|
||||||
|
ListDeidentifyTemplates: retry[[2]string{"default", "idempotent"}],
|
||||||
|
DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}],
|
||||||
|
CreateDlpJob: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
ListDlpJobs: retry[[2]string{"default", "idempotent"}],
|
||||||
|
GetDlpJob: retry[[2]string{"default", "idempotent"}],
|
||||||
|
DeleteDlpJob: retry[[2]string{"default", "idempotent"}],
|
||||||
|
CancelDlpJob: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
ListJobTriggers: retry[[2]string{"default", "idempotent"}],
|
||||||
|
GetJobTrigger: retry[[2]string{"default", "idempotent"}],
|
||||||
|
DeleteJobTrigger: retry[[2]string{"default", "idempotent"}],
|
||||||
|
UpdateJobTrigger: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
CreateJobTrigger: retry[[2]string{"default", "non_idempotent"}],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is a client for interacting with DLP API.
|
||||||
|
type Client struct {
|
||||||
|
// The connection to the service.
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
|
||||||
|
// The gRPC API client.
|
||||||
|
client dlppb.DlpServiceClient
|
||||||
|
|
||||||
|
// The call options for this service.
|
||||||
|
CallOptions *CallOptions
|
||||||
|
|
||||||
|
// The x-goog-* metadata to be sent with each request.
|
||||||
|
xGoogMetadata metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new dlp service client.
|
||||||
|
//
|
||||||
|
// The DLP API is a service that allows clients
|
||||||
|
// to detect the presence of Personally Identifiable Information (PII) and other
|
||||||
|
// privacy-sensitive data in user-supplied, unstructured data streams, like text
|
||||||
|
// blocks or images.
|
||||||
|
// The service also includes methods for sensitive data redaction and
|
||||||
|
// scheduling of data scans on Google Cloud Platform based data sets.
|
||||||
|
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||||
|
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
conn: conn,
|
||||||
|
CallOptions: defaultCallOptions(),
|
||||||
|
|
||||||
|
client: dlppb.NewDlpServiceClient(conn),
|
||||||
|
}
|
||||||
|
c.setGoogleClientInfo()
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection returns the client's connection to the API service.
|
||||||
|
func (c *Client) Connection() *grpc.ClientConn {
|
||||||
|
return c.conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to the API service. The user should invoke this when
|
||||||
|
// the client is no longer required.
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setGoogleClientInfo sets the name and version of the application in
|
||||||
|
// the `x-goog-api-client` header passed on each request. Intended for
|
||||||
|
// use by Google-written clients.
|
||||||
|
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||||
|
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||||
|
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||||
|
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// InspectContent finds potentially sensitive info in content.
|
||||||
|
// This method has limits on input size, processing time, and output size.
|
||||||
|
// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for
|
||||||
|
// images (at /dlp/docs/inspecting-images)
|
||||||
|
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
|
||||||
|
var resp *dlppb.InspectContentResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedactImage redacts potentially sensitive info from an image.
|
||||||
|
// This method has limits on input size, processing time, and output size.
|
||||||
|
// How-to guide (at /dlp/docs/redacting-sensitive-data-images)
|
||||||
|
func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...)
|
||||||
|
var resp *dlppb.RedactImageResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.RedactImage(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeidentifyContent de-identifies potentially sensitive info from a ContentItem.
|
||||||
|
// This method has limits on input size and output size.
|
||||||
|
// How-to guide (at /dlp/docs/deidentify-sensitive-data)
|
||||||
|
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
|
||||||
|
var resp *dlppb.DeidentifyContentResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReidentifyContent re-identify content that has been de-identified.
|
||||||
|
func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...)
|
||||||
|
var resp *dlppb.ReidentifyContentResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListInfoTypes returns sensitive information types DLP supports.
|
||||||
|
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
|
||||||
|
var resp *dlppb.ListInfoTypesResponse
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateInspectTemplate creates an inspect template for re-using frequently used configuration
|
||||||
|
// for inspecting content, images, and storage.
|
||||||
|
func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...)
|
||||||
|
var resp *dlppb.InspectTemplate
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateInspectTemplate updates the inspect template.
|
||||||
|
func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...)
|
||||||
|
var resp *dlppb.InspectTemplate
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInspectTemplate gets an inspect template.
|
||||||
|
func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...)
|
||||||
|
var resp *dlppb.InspectTemplate
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListInspectTemplates lists inspect templates.
|
||||||
|
func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...)
|
||||||
|
it := &InspectTemplateIterator{}
|
||||||
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) {
|
||||||
|
var resp *dlppb.ListInspectTemplatesResponse
|
||||||
|
req.PageToken = pageToken
|
||||||
|
if pageSize > math.MaxInt32 {
|
||||||
|
req.PageSize = math.MaxInt32
|
||||||
|
} else {
|
||||||
|
req.PageSize = int32(pageSize)
|
||||||
|
}
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return resp.InspectTemplates, resp.NextPageToken, nil
|
||||||
|
}
|
||||||
|
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||||
|
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
it.items = append(it.items, items...)
|
||||||
|
return nextPageToken, nil
|
||||||
|
}
|
||||||
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteInspectTemplate deletes inspect templates.
|
||||||
|
func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...)
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
_, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDeidentifyTemplate creates an Deidentify template for re-using frequently used configuration
|
||||||
|
// for Deidentifying content, images, and storage.
|
||||||
|
func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...)
|
||||||
|
var resp *dlppb.DeidentifyTemplate
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDeidentifyTemplate updates the inspect template.
|
||||||
|
func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...)
|
||||||
|
var resp *dlppb.DeidentifyTemplate
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDeidentifyTemplate gets an inspect template.
|
||||||
|
func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...)
|
||||||
|
var resp *dlppb.DeidentifyTemplate
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListDeidentifyTemplates lists inspect templates.
|
||||||
|
func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...)
|
||||||
|
it := &DeidentifyTemplateIterator{}
|
||||||
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) {
|
||||||
|
var resp *dlppb.ListDeidentifyTemplatesResponse
|
||||||
|
req.PageToken = pageToken
|
||||||
|
if pageSize > math.MaxInt32 {
|
||||||
|
req.PageSize = math.MaxInt32
|
||||||
|
} else {
|
||||||
|
req.PageSize = int32(pageSize)
|
||||||
|
}
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return resp.DeidentifyTemplates, resp.NextPageToken, nil
|
||||||
|
}
|
||||||
|
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||||
|
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
it.items = append(it.items, items...)
|
||||||
|
return nextPageToken, nil
|
||||||
|
}
|
||||||
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteDeidentifyTemplate deletes inspect templates.
|
||||||
|
func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...)
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
_, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDlpJob create a new job to inspect storage or calculate risk metrics How-to
|
||||||
|
// guide (at /dlp/docs/compute-risk-analysis).
|
||||||
|
func (c *Client) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.CreateDlpJob[0:len(c.CallOptions.CreateDlpJob):len(c.CallOptions.CreateDlpJob)], opts...)
|
||||||
|
var resp *dlppb.DlpJob
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.CreateDlpJob(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListDlpJobs lists DlpJobs that match the specified filter in the request.
|
||||||
|
func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...)
|
||||||
|
it := &DlpJobIterator{}
|
||||||
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) {
|
||||||
|
var resp *dlppb.ListDlpJobsResponse
|
||||||
|
req.PageToken = pageToken
|
||||||
|
if pageSize > math.MaxInt32 {
|
||||||
|
req.PageSize = math.MaxInt32
|
||||||
|
} else {
|
||||||
|
req.PageSize = int32(pageSize)
|
||||||
|
}
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return resp.Jobs, resp.NextPageToken, nil
|
||||||
|
}
|
||||||
|
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||||
|
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
it.items = append(it.items, items...)
|
||||||
|
return nextPageToken, nil
|
||||||
|
}
|
||||||
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDlpJob gets the latest state of a long-running DlpJob.
|
||||||
|
func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...)
|
||||||
|
var resp *dlppb.DlpJob
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is
|
||||||
|
// no longer interested in the DlpJob result. The job will be cancelled if
|
||||||
|
// possible.
|
||||||
|
func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...)
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
_, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob. The server
|
||||||
|
// makes a best effort to cancel the DlpJob, but success is not
|
||||||
|
// guaranteed.
|
||||||
|
func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...)
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
_, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListJobTriggers lists job triggers.
|
||||||
|
func (c *Client) ListJobTriggers(ctx context.Context, req *dlppb.ListJobTriggersRequest, opts ...gax.CallOption) *JobTriggerIterator {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.ListJobTriggers[0:len(c.CallOptions.ListJobTriggers):len(c.CallOptions.ListJobTriggers)], opts...)
|
||||||
|
it := &JobTriggerIterator{}
|
||||||
|
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.JobTrigger, string, error) {
|
||||||
|
var resp *dlppb.ListJobTriggersResponse
|
||||||
|
req.PageToken = pageToken
|
||||||
|
if pageSize > math.MaxInt32 {
|
||||||
|
req.PageSize = math.MaxInt32
|
||||||
|
} else {
|
||||||
|
req.PageSize = int32(pageSize)
|
||||||
|
}
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.ListJobTriggers(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return resp.JobTriggers, resp.NextPageToken, nil
|
||||||
|
}
|
||||||
|
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||||
|
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
it.items = append(it.items, items...)
|
||||||
|
return nextPageToken, nil
|
||||||
|
}
|
||||||
|
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||||
|
return it
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetJobTrigger gets a job trigger.
|
||||||
|
func (c *Client) GetJobTrigger(ctx context.Context, req *dlppb.GetJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.GetJobTrigger[0:len(c.CallOptions.GetJobTrigger):len(c.CallOptions.GetJobTrigger)], opts...)
|
||||||
|
var resp *dlppb.JobTrigger
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.GetJobTrigger(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteJobTrigger deletes a job trigger.
|
||||||
|
func (c *Client) DeleteJobTrigger(ctx context.Context, req *dlppb.DeleteJobTriggerRequest, opts ...gax.CallOption) error {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.DeleteJobTrigger[0:len(c.CallOptions.DeleteJobTrigger):len(c.CallOptions.DeleteJobTrigger)], opts...)
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
_, err = c.client.DeleteJobTrigger(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateJobTrigger updates a job trigger.
|
||||||
|
func (c *Client) UpdateJobTrigger(ctx context.Context, req *dlppb.UpdateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.UpdateJobTrigger[0:len(c.CallOptions.UpdateJobTrigger):len(c.CallOptions.UpdateJobTrigger)], opts...)
|
||||||
|
var resp *dlppb.JobTrigger
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.UpdateJobTrigger(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateJobTrigger creates a job to run DLP actions such as scanning storage for sensitive
|
||||||
|
// information on a set schedule.
|
||||||
|
func (c *Client) CreateJobTrigger(ctx context.Context, req *dlppb.CreateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) {
|
||||||
|
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||||
|
opts = append(c.CallOptions.CreateJobTrigger[0:len(c.CallOptions.CreateJobTrigger):len(c.CallOptions.CreateJobTrigger)], opts...)
|
||||||
|
var resp *dlppb.JobTrigger
|
||||||
|
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||||
|
var err error
|
||||||
|
resp, err = c.client.CreateJobTrigger(ctx, req, settings.GRPC...)
|
||||||
|
return err
|
||||||
|
}, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate.
|
||||||
|
type DeidentifyTemplateIterator struct {
|
||||||
|
items []*dlppb.DeidentifyTemplate
|
||||||
|
pageInfo *iterator.PageInfo
|
||||||
|
nextFunc func() error
|
||||||
|
|
||||||
|
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||||
|
// It is not part of the stable interface of this package.
|
||||||
|
//
|
||||||
|
// InternalFetch returns results from a single call to the underlying RPC.
|
||||||
|
// The number of results is no greater than pageSize.
|
||||||
|
// If there are no more results, nextPageToken is empty and err is nil.
|
||||||
|
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||||
|
func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo {
|
||||||
|
return it.pageInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||||
|
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||||
|
func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) {
|
||||||
|
var item *dlppb.DeidentifyTemplate
|
||||||
|
if err := it.nextFunc(); err != nil {
|
||||||
|
return item, err
|
||||||
|
}
|
||||||
|
item = it.items[0]
|
||||||
|
it.items = it.items[1:]
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *DeidentifyTemplateIterator) bufLen() int {
|
||||||
|
return len(it.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *DeidentifyTemplateIterator) takeBuf() interface{} {
|
||||||
|
b := it.items
|
||||||
|
it.items = nil
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// DlpJobIterator manages a stream of *dlppb.DlpJob.
|
||||||
|
type DlpJobIterator struct {
|
||||||
|
items []*dlppb.DlpJob
|
||||||
|
pageInfo *iterator.PageInfo
|
||||||
|
nextFunc func() error
|
||||||
|
|
||||||
|
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||||
|
// It is not part of the stable interface of this package.
|
||||||
|
//
|
||||||
|
// InternalFetch returns results from a single call to the underlying RPC.
|
||||||
|
// The number of results is no greater than pageSize.
|
||||||
|
// If there are no more results, nextPageToken is empty and err is nil.
|
||||||
|
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||||
|
func (it *DlpJobIterator) PageInfo() *iterator.PageInfo {
|
||||||
|
return it.pageInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||||
|
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||||
|
func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) {
|
||||||
|
var item *dlppb.DlpJob
|
||||||
|
if err := it.nextFunc(); err != nil {
|
||||||
|
return item, err
|
||||||
|
}
|
||||||
|
item = it.items[0]
|
||||||
|
it.items = it.items[1:]
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *DlpJobIterator) bufLen() int {
|
||||||
|
return len(it.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *DlpJobIterator) takeBuf() interface{} {
|
||||||
|
b := it.items
|
||||||
|
it.items = nil
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate.
|
||||||
|
type InspectTemplateIterator struct {
|
||||||
|
items []*dlppb.InspectTemplate
|
||||||
|
pageInfo *iterator.PageInfo
|
||||||
|
nextFunc func() error
|
||||||
|
|
||||||
|
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||||
|
// It is not part of the stable interface of this package.
|
||||||
|
//
|
||||||
|
// InternalFetch returns results from a single call to the underlying RPC.
|
||||||
|
// The number of results is no greater than pageSize.
|
||||||
|
// If there are no more results, nextPageToken is empty and err is nil.
|
||||||
|
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||||
|
func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo {
|
||||||
|
return it.pageInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||||
|
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||||
|
func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) {
|
||||||
|
var item *dlppb.InspectTemplate
|
||||||
|
if err := it.nextFunc(); err != nil {
|
||||||
|
return item, err
|
||||||
|
}
|
||||||
|
item = it.items[0]
|
||||||
|
it.items = it.items[1:]
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *InspectTemplateIterator) bufLen() int {
|
||||||
|
return len(it.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *InspectTemplateIterator) takeBuf() interface{} {
|
||||||
|
b := it.items
|
||||||
|
it.items = nil
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// JobTriggerIterator manages a stream of *dlppb.JobTrigger.
|
||||||
|
type JobTriggerIterator struct {
|
||||||
|
items []*dlppb.JobTrigger
|
||||||
|
pageInfo *iterator.PageInfo
|
||||||
|
nextFunc func() error
|
||||||
|
|
||||||
|
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||||
|
// It is not part of the stable interface of this package.
|
||||||
|
//
|
||||||
|
// InternalFetch returns results from a single call to the underlying RPC.
|
||||||
|
// The number of results is no greater than pageSize.
|
||||||
|
// If there are no more results, nextPageToken is empty and err is nil.
|
||||||
|
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.JobTrigger, nextPageToken string, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||||
|
func (it *JobTriggerIterator) PageInfo() *iterator.PageInfo {
|
||||||
|
return it.pageInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||||
|
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||||
|
func (it *JobTriggerIterator) Next() (*dlppb.JobTrigger, error) {
|
||||||
|
var item *dlppb.JobTrigger
|
||||||
|
if err := it.nextFunc(); err != nil {
|
||||||
|
return item, err
|
||||||
|
}
|
||||||
|
item = it.items[0]
|
||||||
|
it.items = it.items[1:]
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *JobTriggerIterator) bufLen() int {
|
||||||
|
return len(it.items)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *JobTriggerIterator) takeBuf() interface{} {
|
||||||
|
b := it.items
|
||||||
|
it.items = nil
|
||||||
|
return b
|
||||||
|
}
|
498
vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go
generated
vendored
Normal file
498
vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,498 @@
|
||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
package dlp_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cloud.google.com/go/dlp/apiv2"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/api/iterator"
|
||||||
|
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewClient() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use client.
|
||||||
|
_ = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_InspectContent() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.InspectContentRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.InspectContent(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_RedactImage() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.RedactImageRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.RedactImage(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_DeidentifyContent() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.DeidentifyContentRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.DeidentifyContent(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ReidentifyContent() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.ReidentifyContentRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.ReidentifyContent(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ListInfoTypes() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.ListInfoTypesRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.ListInfoTypes(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_CreateInspectTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.CreateInspectTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.CreateInspectTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_UpdateInspectTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.UpdateInspectTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.UpdateInspectTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_GetInspectTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.GetInspectTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.GetInspectTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ListInspectTemplates() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.ListInspectTemplatesRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
it := c.ListInspectTemplates(ctx, req)
|
||||||
|
for {
|
||||||
|
resp, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_DeleteInspectTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.DeleteInspectTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
err = c.DeleteInspectTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_CreateDeidentifyTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.CreateDeidentifyTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.CreateDeidentifyTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_UpdateDeidentifyTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.UpdateDeidentifyTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.UpdateDeidentifyTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_GetDeidentifyTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.GetDeidentifyTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.GetDeidentifyTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ListDeidentifyTemplates() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.ListDeidentifyTemplatesRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
it := c.ListDeidentifyTemplates(ctx, req)
|
||||||
|
for {
|
||||||
|
resp, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_DeleteDeidentifyTemplate() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.DeleteDeidentifyTemplateRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
err = c.DeleteDeidentifyTemplate(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_CreateDlpJob() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.CreateDlpJobRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.CreateDlpJob(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ListDlpJobs() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.ListDlpJobsRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
it := c.ListDlpJobs(ctx, req)
|
||||||
|
for {
|
||||||
|
resp, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_GetDlpJob() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.GetDlpJobRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.GetDlpJob(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_DeleteDlpJob() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.DeleteDlpJobRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
err = c.DeleteDlpJob(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_CancelDlpJob() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.CancelDlpJobRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
err = c.CancelDlpJob(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_ListJobTriggers() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.ListJobTriggersRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
it := c.ListJobTriggers(ctx, req)
|
||||||
|
for {
|
||||||
|
resp, err := it.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_GetJobTrigger() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.GetJobTriggerRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.GetJobTrigger(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_DeleteJobTrigger() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.DeleteJobTriggerRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
err = c.DeleteJobTrigger(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_UpdateJobTrigger() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.UpdateJobTriggerRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.UpdateJobTrigger(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleClient_CreateJobTrigger() {
|
||||||
|
ctx := context.Background()
|
||||||
|
c, err := dlp.NewClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &dlppb.CreateJobTriggerRequest{
|
||||||
|
// TODO: Fill request struct fields.
|
||||||
|
}
|
||||||
|
resp, err := c.CreateJobTrigger(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Handle error.
|
||||||
|
}
|
||||||
|
// TODO: Use resp.
|
||||||
|
_ = resp
|
||||||
|
}
|
45
vendor/cloud.google.com/go/dlp/apiv2/doc.go
generated
vendored
Normal file
45
vendor/cloud.google.com/go/dlp/apiv2/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Package dlp is an auto-generated package for the
|
||||||
|
// DLP API.
|
||||||
|
//
|
||||||
|
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||||
|
//
|
||||||
|
package dlp // import "cloud.google.com/go/dlp/apiv2"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
out = out.Copy()
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metadata.NewOutgoingContext(ctx, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||||
|
func DefaultAuthScopes() []string {
|
||||||
|
return []string{
|
||||||
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
|
}
|
||||||
|
}
|
1902
vendor/cloud.google.com/go/dlp/apiv2/mock_test.go
generated
vendored
Normal file
1902
vendor/cloud.google.com/go/dlp/apiv2/mock_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
4
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
4
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
|
@ -69,13 +69,13 @@ func TestReportErrorsServiceSmoke(t *testing.T) {
|
||||||
LineNumber: lineNumber,
|
LineNumber: lineNumber,
|
||||||
FunctionName: functionName,
|
FunctionName: functionName,
|
||||||
}
|
}
|
||||||
var context = &clouderrorreportingpb.ErrorContext{
|
var context_ = &clouderrorreportingpb.ErrorContext{
|
||||||
ReportLocation: reportLocation,
|
ReportLocation: reportLocation,
|
||||||
}
|
}
|
||||||
var event = &clouderrorreportingpb.ReportedErrorEvent{
|
var event = &clouderrorreportingpb.ReportedErrorEvent{
|
||||||
Message: message,
|
Message: message,
|
||||||
ServiceContext: serviceContext,
|
ServiceContext: serviceContext,
|
||||||
Context: context,
|
Context: context_,
|
||||||
}
|
}
|
||||||
var request = &clouderrorreportingpb.ReportErrorEventRequest{
|
var request = &clouderrorreportingpb.ReportErrorEventRequest{
|
||||||
ProjectName: formattedProjectName,
|
ProjectName: formattedProjectName,
|
||||||
|
|
13
vendor/cloud.google.com/go/firestore/Makefile
generated
vendored
Normal file
13
vendor/cloud.google.com/go/firestore/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Copy textproto files in this directory from the source of truth.
|
||||||
|
|
||||||
|
SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore
|
||||||
|
|
||||||
|
.PHONY: refresh-tests
|
||||||
|
|
||||||
|
refresh-tests:
|
||||||
|
-rm genproto/*.pb.go
|
||||||
|
cp $(SRC)/genproto/*.pb.go genproto
|
||||||
|
-rm testdata/*.textproto
|
||||||
|
cp $(SRC)/testdata/*.textproto testdata
|
||||||
|
openssl dgst -sha1 $(SRC)/testdata/test-suite.binproto > testdata/VERSION
|
||||||
|
|
29
vendor/cloud.google.com/go/firestore/client.go
generated
vendored
29
vendor/cloud.google.com/go/firestore/client.go
generated
vendored
|
@ -29,6 +29,7 @@ import (
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
@ -128,6 +129,10 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen
|
||||||
if err := checkTransaction(ctx); err != nil {
|
if err := checkTransaction(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return c.getAll(ctx, docRefs, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getAll(ctx context.Context, docRefs []*DocumentRef, tid []byte) ([]*DocumentSnapshot, error) {
|
||||||
var docNames []string
|
var docNames []string
|
||||||
for _, dr := range docRefs {
|
for _, dr := range docRefs {
|
||||||
if dr == nil {
|
if dr == nil {
|
||||||
|
@ -139,13 +144,21 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen
|
||||||
Database: c.path(),
|
Database: c.path(),
|
||||||
Documents: docNames,
|
Documents: docNames,
|
||||||
}
|
}
|
||||||
|
if tid != nil {
|
||||||
|
req.ConsistencySelector = &pb.BatchGetDocumentsRequest_Transaction{tid}
|
||||||
|
}
|
||||||
streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req)
|
streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read results from the stream and add them to a map.
|
// Read results from the stream and add them to a map.
|
||||||
docMap := map[string]*pb.Document{}
|
type result struct {
|
||||||
|
doc *pb.Document
|
||||||
|
readTime *tspb.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
docMap := map[string]result{}
|
||||||
for {
|
for {
|
||||||
res, err := streamClient.Recv()
|
res, err := streamClient.Recv()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -156,13 +169,13 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen
|
||||||
}
|
}
|
||||||
switch x := res.Result.(type) {
|
switch x := res.Result.(type) {
|
||||||
case *pb.BatchGetDocumentsResponse_Found:
|
case *pb.BatchGetDocumentsResponse_Found:
|
||||||
docMap[x.Found.Name] = x.Found
|
docMap[x.Found.Name] = result{x.Found, res.ReadTime}
|
||||||
|
|
||||||
case *pb.BatchGetDocumentsResponse_Missing:
|
case *pb.BatchGetDocumentsResponse_Missing:
|
||||||
if docMap[x.Missing] != nil {
|
if _, ok := docMap[x.Missing]; ok {
|
||||||
return nil, fmt.Errorf("firestore: %q both missing and present", x.Missing)
|
return nil, fmt.Errorf("firestore: %q seen twice", x.Missing)
|
||||||
}
|
}
|
||||||
docMap[x.Missing] = nil
|
docMap[x.Missing] = result{nil, res.ReadTime}
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type")
|
return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type")
|
||||||
}
|
}
|
||||||
|
@ -172,12 +185,12 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen
|
||||||
// DocumentRefs.
|
// DocumentRefs.
|
||||||
docs := make([]*DocumentSnapshot, len(docNames))
|
docs := make([]*DocumentSnapshot, len(docNames))
|
||||||
for i, name := range docNames {
|
for i, name := range docNames {
|
||||||
pbDoc, ok := docMap[name]
|
r, ok := docMap[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name)
|
return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name)
|
||||||
}
|
}
|
||||||
if pbDoc != nil {
|
if r.doc != nil {
|
||||||
doc, err := newDocumentSnapshot(docRefs[i], pbDoc, c)
|
doc, err := newDocumentSnapshot(docRefs[i], r.doc, c, r.readTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
52
vendor/cloud.google.com/go/firestore/client_test.go
generated
vendored
52
vendor/cloud.google.com/go/firestore/client_test.go
generated
vendored
|
@ -17,10 +17,11 @@ package firestore
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testClient = &Client{
|
var testClient = &Client{
|
||||||
|
@ -92,10 +93,23 @@ func TestClientCollDocErrors(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetAll(t *testing.T) {
|
func TestGetAll(t *testing.T) {
|
||||||
ctx := context.Background()
|
|
||||||
const dbPath = "projects/projectID/databases/(default)"
|
|
||||||
c, srv := newMock(t)
|
c, srv := newMock(t)
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
const dbPath = "projects/projectID/databases/(default)"
|
||||||
|
req := &pb.BatchGetDocumentsRequest{
|
||||||
|
Database: dbPath,
|
||||||
|
Documents: []string{
|
||||||
|
dbPath + "/documents/C/a",
|
||||||
|
dbPath + "/documents/C/b",
|
||||||
|
dbPath + "/documents/C/c",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testGetAll(t, c, srv, dbPath, func(drs []*DocumentRef) ([]*DocumentSnapshot, error) {
|
||||||
|
return c.GetAll(context.Background(), drs)
|
||||||
|
}, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testGetAll(t *testing.T, c *Client, srv *mockServer, dbPath string, getAll func([]*DocumentRef) ([]*DocumentSnapshot, error), req *pb.BatchGetDocumentsRequest) {
|
||||||
wantPBDocs := []*pb.Document{
|
wantPBDocs := []*pb.Document{
|
||||||
{
|
{
|
||||||
Name: dbPath + "/documents/C/a",
|
Name: dbPath + "/documents/C/a",
|
||||||
|
@ -111,25 +125,21 @@ func TestGetAll(t *testing.T) {
|
||||||
Fields: map[string]*pb.Value{"f": intval(1)},
|
Fields: map[string]*pb.Value{"f": intval(1)},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
srv.addRPC(
|
wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2, aTimestamp3}
|
||||||
&pb.BatchGetDocumentsRequest{
|
srv.addRPC(req,
|
||||||
Database: dbPath,
|
|
||||||
Documents: []string{
|
|
||||||
dbPath + "/documents/C/a",
|
|
||||||
dbPath + "/documents/C/b",
|
|
||||||
dbPath + "/documents/C/c",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
// deliberately put these out of order
|
// deliberately put these out of order
|
||||||
&pb.BatchGetDocumentsResponse{
|
&pb.BatchGetDocumentsResponse{
|
||||||
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]},
|
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]},
|
||||||
|
ReadTime: aTimestamp3,
|
||||||
},
|
},
|
||||||
&pb.BatchGetDocumentsResponse{
|
&pb.BatchGetDocumentsResponse{
|
||||||
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]},
|
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]},
|
||||||
|
ReadTime: aTimestamp,
|
||||||
},
|
},
|
||||||
&pb.BatchGetDocumentsResponse{
|
&pb.BatchGetDocumentsResponse{
|
||||||
Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"},
|
Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"},
|
||||||
|
ReadTime: aTimestamp2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -138,7 +148,7 @@ func TestGetAll(t *testing.T) {
|
||||||
for _, name := range []string{"a", "b", "c"} {
|
for _, name := range []string{"a", "b", "c"} {
|
||||||
docRefs = append(docRefs, coll.Doc(name))
|
docRefs = append(docRefs, coll.Doc(name))
|
||||||
}
|
}
|
||||||
docs, err := c.GetAll(ctx, docRefs)
|
docs, err := getAll(docRefs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -148,7 +158,7 @@ func TestGetAll(t *testing.T) {
|
||||||
for i, got := range docs {
|
for i, got := range docs {
|
||||||
var want *DocumentSnapshot
|
var want *DocumentSnapshot
|
||||||
if wantPBDocs[i] != nil {
|
if wantPBDocs[i] != nil {
|
||||||
want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c)
|
want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c, wantReadTimes[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -176,7 +186,7 @@ func TestGetAllErrors(t *testing.T) {
|
||||||
Database: dbPath,
|
Database: dbPath,
|
||||||
Documents: []string{docPath},
|
Documents: []string{docPath},
|
||||||
},
|
},
|
||||||
[]interface{}{grpc.Errorf(codes.Internal, "")},
|
[]interface{}{status.Errorf(codes.Internal, "")},
|
||||||
)
|
)
|
||||||
_, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")})
|
_, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")})
|
||||||
codeEq(t, "GetAll #1", codes.Internal, err)
|
codeEq(t, "GetAll #1", codes.Internal, err)
|
||||||
|
@ -190,10 +200,12 @@ func TestGetAllErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
&pb.BatchGetDocumentsResponse{
|
&pb.BatchGetDocumentsResponse{
|
||||||
Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}},
|
Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}},
|
||||||
|
ReadTime: aTimestamp,
|
||||||
},
|
},
|
||||||
&pb.BatchGetDocumentsResponse{
|
&pb.BatchGetDocumentsResponse{
|
||||||
Result: &pb.BatchGetDocumentsResponse_Missing{docPath},
|
Result: &pb.BatchGetDocumentsResponse_Missing{docPath},
|
||||||
|
ReadTime: aTimestamp,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
136
vendor/cloud.google.com/go/firestore/cross_language_test.go
generated
vendored
136
vendor/cloud.google.com/go/firestore/cross_language_test.go
generated
vendored
|
@ -18,6 +18,7 @@ package firestore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
|
@ -63,22 +64,34 @@ func runTestFromFile(t *testing.T, filename string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTest(t *testing.T, msg string, test *pb.Test) {
|
func runTest(t *testing.T, msg string, test *pb.Test) {
|
||||||
check := func(gotErr error, wantErr bool) {
|
check := func(gotErr error, wantErr bool) bool {
|
||||||
if wantErr && gotErr == nil {
|
if wantErr && gotErr == nil {
|
||||||
t.Errorf("%s: got nil, want error", msg)
|
t.Errorf("%s: got nil, want error", msg)
|
||||||
|
return false
|
||||||
} else if !wantErr && gotErr != nil {
|
} else if !wantErr && gotErr != nil {
|
||||||
t.Errorf("%s: %v", msg, gotErr)
|
t.Errorf("%s: %v", msg, gotErr)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c, srv := newMock(t)
|
c, srv := newMock(t)
|
||||||
|
|
||||||
switch tt := test.Test.(type) {
|
switch tt := test.Test.(type) {
|
||||||
case *pb.Test_Get:
|
case *pb.Test_Get:
|
||||||
srv.addRPC(tt.Get.Request, &fspb.Document{
|
req := &fspb.BatchGetDocumentsRequest{
|
||||||
CreateTime: &ts.Timestamp{},
|
Database: c.path(),
|
||||||
UpdateTime: &ts.Timestamp{},
|
Documents: []string{tt.Get.DocRefPath},
|
||||||
|
}
|
||||||
|
srv.addRPC(req, []interface{}{
|
||||||
|
&fspb.BatchGetDocumentsResponse{
|
||||||
|
Result: &fspb.BatchGetDocumentsResponse_Found{&fspb.Document{
|
||||||
|
Name: tt.Get.DocRefPath,
|
||||||
|
CreateTime: &ts.Timestamp{},
|
||||||
|
UpdateTime: &ts.Timestamp{},
|
||||||
|
}},
|
||||||
|
ReadTime: &ts.Timestamp{},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
ref := docRefFromPath(tt.Get.DocRefPath, c)
|
ref := docRefFromPath(tt.Get.DocRefPath, c)
|
||||||
_, err := ref.Get(ctx)
|
_, err := ref.Get(ctx)
|
||||||
|
@ -126,14 +139,13 @@ func runTest(t *testing.T, msg string, test *pb.Test) {
|
||||||
paths := convertFieldPaths(tt.UpdatePaths.FieldPaths)
|
paths := convertFieldPaths(tt.UpdatePaths.FieldPaths)
|
||||||
var ups []Update
|
var ups []Update
|
||||||
for i, path := range paths {
|
for i, path := range paths {
|
||||||
jsonValue := tt.UpdatePaths.JsonValues[i]
|
val, err := convertJSONValue(tt.UpdatePaths.JsonValues[i])
|
||||||
var val interface{}
|
if err != nil {
|
||||||
if err := json.Unmarshal([]byte(jsonValue), &val); err != nil {
|
t.Fatalf("%s: %v", msg, err)
|
||||||
t.Fatalf("%s: %q: %v", msg, jsonValue, err)
|
|
||||||
}
|
}
|
||||||
ups = append(ups, Update{
|
ups = append(ups, Update{
|
||||||
FieldPath: path,
|
FieldPath: path,
|
||||||
Value: convertTestValue(val),
|
Value: val,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
_, err := ref.Update(ctx, ups, preconds...)
|
_, err := ref.Update(ctx, ups, preconds...)
|
||||||
|
@ -146,6 +158,15 @@ func runTest(t *testing.T, msg string, test *pb.Test) {
|
||||||
_, err := ref.Delete(ctx, preconds...)
|
_, err := ref.Delete(ctx, preconds...)
|
||||||
check(err, tt.Delete.IsError)
|
check(err, tt.Delete.IsError)
|
||||||
|
|
||||||
|
case *pb.Test_Query:
|
||||||
|
q := convertQuery(t, tt.Query)
|
||||||
|
got, err := q.toProto()
|
||||||
|
if check(err, tt.Query.IsError) && err == nil {
|
||||||
|
if want := tt.Query.Query; !proto.Equal(got, want) {
|
||||||
|
t.Errorf("%s\ngot: %s\nwant: %s", msg, proto.MarshalTextString(got), proto.MarshalTextString(want))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
t.Fatalf("unknown test type %T", tt)
|
t.Fatalf("unknown test type %T", tt)
|
||||||
}
|
}
|
||||||
|
@ -159,6 +180,14 @@ func docRefFromPath(p string, c *Client) *DocumentRef {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func convertJSONValue(jv string) (interface{}, error) {
|
||||||
|
var val interface{}
|
||||||
|
if err := json.Unmarshal([]byte(jv), &val); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return convertTestValue(val), nil
|
||||||
|
}
|
||||||
|
|
||||||
func convertData(jsonData string) (map[string]interface{}, error) {
|
func convertData(jsonData string) (map[string]interface{}, error) {
|
||||||
var m map[string]interface{}
|
var m map[string]interface{}
|
||||||
if err := json.Unmarshal([]byte(jsonData), &m); err != nil {
|
if err := json.Unmarshal([]byte(jsonData), &m); err != nil {
|
||||||
|
@ -236,3 +265,90 @@ func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition {
|
||||||
}
|
}
|
||||||
return []Precondition{pc}
|
return []Precondition{pc}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func convertQuery(t *testing.T, qt *pb.QueryTest) Query {
|
||||||
|
parts := strings.Split(qt.CollPath, "/")
|
||||||
|
q := Query{
|
||||||
|
parentPath: strings.Join(parts[:len(parts)-2], "/"),
|
||||||
|
collectionID: parts[len(parts)-1],
|
||||||
|
}
|
||||||
|
for _, c := range qt.Clauses {
|
||||||
|
switch c := c.Clause.(type) {
|
||||||
|
case *pb.Clause_Select:
|
||||||
|
q = q.SelectPaths(convertFieldPaths(c.Select.Fields)...)
|
||||||
|
case *pb.Clause_OrderBy:
|
||||||
|
var dir Direction
|
||||||
|
switch c.OrderBy.Direction {
|
||||||
|
case "asc":
|
||||||
|
dir = Asc
|
||||||
|
case "desc":
|
||||||
|
dir = Desc
|
||||||
|
default:
|
||||||
|
t.Fatalf("bad direction: %q", c.OrderBy.Direction)
|
||||||
|
}
|
||||||
|
q = q.OrderByPath(FieldPath(c.OrderBy.Path.Field), dir)
|
||||||
|
case *pb.Clause_Where:
|
||||||
|
val, err := convertJSONValue(c.Where.JsonValue)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
q = q.WherePath(FieldPath(c.Where.Path.Field), c.Where.Op, val)
|
||||||
|
case *pb.Clause_Offset:
|
||||||
|
q = q.Offset(int(c.Offset))
|
||||||
|
case *pb.Clause_Limit:
|
||||||
|
q = q.Limit(int(c.Limit))
|
||||||
|
case *pb.Clause_StartAt:
|
||||||
|
q = q.StartAt(convertCursor(t, c.StartAt)...)
|
||||||
|
case *pb.Clause_StartAfter:
|
||||||
|
q = q.StartAfter(convertCursor(t, c.StartAfter)...)
|
||||||
|
case *pb.Clause_EndAt:
|
||||||
|
q = q.EndAt(convertCursor(t, c.EndAt)...)
|
||||||
|
case *pb.Clause_EndBefore:
|
||||||
|
q = q.EndBefore(convertCursor(t, c.EndBefore)...)
|
||||||
|
default:
|
||||||
|
t.Fatalf("bad clause type %T", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns args to a cursor method (StartAt, etc.).
|
||||||
|
func convertCursor(t *testing.T, c *pb.Cursor) []interface{} {
|
||||||
|
if c.DocSnapshot != nil {
|
||||||
|
ds, err := convertDocSnapshot(c.DocSnapshot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return []interface{}{ds}
|
||||||
|
}
|
||||||
|
var vals []interface{}
|
||||||
|
for _, jv := range c.JsonValues {
|
||||||
|
v, err := convertJSONValue(jv)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
vals = append(vals, v)
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertDocSnapshot(ds *pb.DocSnapshot) (*DocumentSnapshot, error) {
|
||||||
|
data, err := convertData(ds.JsonData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
doc, transformPaths, err := toProtoDocument(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(transformPaths) > 0 {
|
||||||
|
return nil, errors.New("saw transform paths in DocSnapshot")
|
||||||
|
}
|
||||||
|
return &DocumentSnapshot{
|
||||||
|
Ref: &DocumentRef{
|
||||||
|
Path: ds.Path,
|
||||||
|
Parent: &CollectionRef{Path: path.Dir(ds.Path)},
|
||||||
|
},
|
||||||
|
proto: doc,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
3
vendor/cloud.google.com/go/firestore/doc.go
generated
vendored
3
vendor/cloud.google.com/go/firestore/doc.go
generated
vendored
|
@ -21,6 +21,9 @@ database.
|
||||||
See https://cloud.google.com/firestore/docs for an introduction
|
See https://cloud.google.com/firestore/docs for an introduction
|
||||||
to Cloud Firestore and additional help on using the Firestore API.
|
to Cloud Firestore and additional help on using the Firestore API.
|
||||||
|
|
||||||
|
Note: you can't use both Cloud Firestore and Cloud Datastore in the same
|
||||||
|
project.
|
||||||
|
|
||||||
Creating a Client
|
Creating a Client
|
||||||
|
|
||||||
To start working with this package, create a client with a project ID:
|
To start working with this package, create a client with a project ID:
|
||||||
|
|
10
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
10
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
|
@ -22,6 +22,8 @@ import (
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
vkit "cloud.google.com/go/firestore/apiv1beta1"
|
vkit "cloud.google.com/go/firestore/apiv1beta1"
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
@ -64,12 +66,14 @@ func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) {
|
||||||
if d == nil {
|
if d == nil {
|
||||||
return nil, errNilDocRef
|
return nil, errNilDocRef
|
||||||
}
|
}
|
||||||
doc, err := d.Parent.c.c.GetDocument(withResourceHeader(ctx, d.Parent.c.path()),
|
docsnaps, err := d.Parent.c.getAll(ctx, []*DocumentRef{d}, nil)
|
||||||
&pb.GetDocumentRequest{Name: d.Path})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newDocumentSnapshot(d, doc, d.Parent.c)
|
if docsnaps[0] == nil {
|
||||||
|
return nil, status.Errorf(codes.NotFound, "%q not found", d.Path)
|
||||||
|
}
|
||||||
|
return docsnaps[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates the document with the given data.
|
// Create creates the document with the given data.
|
||||||
|
|
26
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
26
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
|
@ -45,7 +45,15 @@ func TestDocGet(t *testing.T) {
|
||||||
UpdateTime: aTimestamp,
|
UpdateTime: aTimestamp,
|
||||||
Fields: map[string]*pb.Value{"f": intval(1)},
|
Fields: map[string]*pb.Value{"f": intval(1)},
|
||||||
}
|
}
|
||||||
srv.addRPC(&pb.GetDocumentRequest{Name: path}, pdoc)
|
srv.addRPC(&pb.BatchGetDocumentsRequest{
|
||||||
|
Database: c.path(),
|
||||||
|
Documents: []string{path},
|
||||||
|
}, []interface{}{
|
||||||
|
&pb.BatchGetDocumentsResponse{
|
||||||
|
Result: &pb.BatchGetDocumentsResponse_Found{pdoc},
|
||||||
|
ReadTime: aTimestamp2,
|
||||||
|
},
|
||||||
|
})
|
||||||
ref := c.Collection("C").Doc("a")
|
ref := c.Collection("C").Doc("a")
|
||||||
gotDoc, err := ref.Get(ctx)
|
gotDoc, err := ref.Get(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -55,6 +63,7 @@ func TestDocGet(t *testing.T) {
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
CreateTime: aTime,
|
CreateTime: aTime,
|
||||||
UpdateTime: aTime,
|
UpdateTime: aTime,
|
||||||
|
ReadTime: aTime2,
|
||||||
proto: pdoc,
|
proto: pdoc,
|
||||||
c: c,
|
c: c,
|
||||||
}
|
}
|
||||||
|
@ -62,12 +71,17 @@ func TestDocGet(t *testing.T) {
|
||||||
t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc)
|
t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
path2 := "projects/projectID/databases/(default)/documents/C/b"
|
||||||
srv.addRPC(
|
srv.addRPC(
|
||||||
&pb.GetDocumentRequest{
|
&pb.BatchGetDocumentsRequest{
|
||||||
Name: "projects/projectID/databases/(default)/documents/C/b",
|
Database: c.path(),
|
||||||
},
|
Documents: []string{path2},
|
||||||
grpc.Errorf(codes.NotFound, "not found"),
|
}, []interface{}{
|
||||||
)
|
&pb.BatchGetDocumentsResponse{
|
||||||
|
Result: &pb.BatchGetDocumentsResponse_Missing{path2},
|
||||||
|
ReadTime: aTimestamp3,
|
||||||
|
},
|
||||||
|
})
|
||||||
_, err = c.Collection("C").Doc("b").Get(ctx)
|
_, err = c.Collection("C").Doc("b").Get(ctx)
|
||||||
if grpc.Code(err) != codes.NotFound {
|
if grpc.Code(err) != codes.NotFound {
|
||||||
t.Errorf("got %v, want NotFound", err)
|
t.Errorf("got %v, want NotFound", err)
|
||||||
|
|
14
vendor/cloud.google.com/go/firestore/document.go
generated
vendored
14
vendor/cloud.google.com/go/firestore/document.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A DocumentSnapshot contains document data and metadata.
|
// A DocumentSnapshot contains document data and metadata.
|
||||||
|
@ -42,6 +43,9 @@ type DocumentSnapshot struct {
|
||||||
// documents and the read time of a query.
|
// documents and the read time of a query.
|
||||||
UpdateTime time.Time
|
UpdateTime time.Time
|
||||||
|
|
||||||
|
// Read-only. The time at which the document was read.
|
||||||
|
ReadTime time.Time
|
||||||
|
|
||||||
c *Client
|
c *Client
|
||||||
proto *pb.Document
|
proto *pb.Document
|
||||||
}
|
}
|
||||||
|
@ -241,7 +245,7 @@ func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]Field
|
||||||
return paths, nil
|
return paths, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*DocumentSnapshot, error) {
|
func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client, readTime *tspb.Timestamp) (*DocumentSnapshot, error) {
|
||||||
d := &DocumentSnapshot{
|
d := &DocumentSnapshot{
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
c: c,
|
c: c,
|
||||||
|
@ -257,5 +261,13 @@ func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*Docu
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.UpdateTime = ts
|
d.UpdateTime = ts
|
||||||
|
// TODO(jba): remove nil check when all callers pass a read time.
|
||||||
|
if readTime != nil {
|
||||||
|
ts, err = ptypes.Timestamp(readTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
d.ReadTime = ts
|
||||||
|
}
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
3
vendor/cloud.google.com/go/firestore/document_test.go
generated
vendored
3
vendor/cloud.google.com/go/firestore/document_test.go
generated
vendored
|
@ -69,10 +69,11 @@ func TestNewDocumentSnapshot(t *testing.T) {
|
||||||
Ref: docRef,
|
Ref: docRef,
|
||||||
CreateTime: time.Unix(10, 0).UTC(),
|
CreateTime: time.Unix(10, 0).UTC(),
|
||||||
UpdateTime: time.Unix(20, 0).UTC(),
|
UpdateTime: time.Unix(20, 0).UTC(),
|
||||||
|
ReadTime: aTime,
|
||||||
proto: in,
|
proto: in,
|
||||||
c: c,
|
c: c,
|
||||||
}
|
}
|
||||||
got, err := newDocumentSnapshot(docRef, in, c)
|
got, err := newDocumentSnapshot(docRef, in, c, aTimestamp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
8
vendor/cloud.google.com/go/firestore/from_value.go
generated
vendored
8
vendor/cloud.google.com/go/firestore/from_value.go
generated
vendored
|
@ -73,6 +73,14 @@ func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) erro
|
||||||
v.Set(reflect.ValueOf(t))
|
v.Set(reflect.ValueOf(t))
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
|
case typeOfProtoTimestamp:
|
||||||
|
x, ok := val.(*pb.Value_TimestampValue)
|
||||||
|
if !ok {
|
||||||
|
return typeErr()
|
||||||
|
}
|
||||||
|
v.Set(reflect.ValueOf(x.TimestampValue))
|
||||||
|
return nil
|
||||||
|
|
||||||
case typeOfLatLng:
|
case typeOfLatLng:
|
||||||
x, ok := val.(*pb.Value_GeoPointValue)
|
x, ok := val.(*pb.Value_GeoPointValue)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
8
vendor/cloud.google.com/go/firestore/from_value_test.go
generated
vendored
8
vendor/cloud.google.com/go/firestore/from_value_test.go
generated
vendored
|
@ -24,14 +24,16 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
ts "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
|
||||||
"google.golang.org/genproto/googleapis/type/latlng"
|
"google.golang.org/genproto/googleapis/type/latlng"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC)
|
tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC)
|
||||||
ll = &latlng.LatLng{Latitude: 20, Longitude: 30}
|
ll = &latlng.LatLng{Latitude: 20, Longitude: 30}
|
||||||
|
ptm = &ts.Timestamp{12345, 67890}
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCreateFromProtoValue(t *testing.T) {
|
func TestCreateFromProtoValue(t *testing.T) {
|
||||||
|
@ -187,6 +189,7 @@ func TestSetFromProtoValueNoJSON(t *testing.T) {
|
||||||
bs []byte
|
bs []byte
|
||||||
tmi time.Time
|
tmi time.Time
|
||||||
lli *latlng.LatLng
|
lli *latlng.LatLng
|
||||||
|
tmp *ts.Timestamp
|
||||||
)
|
)
|
||||||
bytes := []byte{1, 2, 3}
|
bytes := []byte{1, 2, 3}
|
||||||
|
|
||||||
|
@ -197,6 +200,7 @@ func TestSetFromProtoValueNoJSON(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{&bs, bytesval(bytes), bytes},
|
{&bs, bytesval(bytes), bytes},
|
||||||
{&tmi, tsval(tm), tm},
|
{&tmi, tsval(tm), tm},
|
||||||
|
{&tmp, &pb.Value{&pb.Value_TimestampValue{ptm}}, ptm},
|
||||||
{&lli, geoval(ll), ll},
|
{&lli, geoval(ll), ll},
|
||||||
} {
|
} {
|
||||||
if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil {
|
if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil {
|
||||||
|
|
3
vendor/cloud.google.com/go/firestore/genproto/README.md
generated
vendored
3
vendor/cloud.google.com/go/firestore/genproto/README.md
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
The contents of this directory are copied from
|
|
||||||
github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/genproto.
|
|
||||||
|
|
673
vendor/cloud.google.com/go/firestore/genproto/test.pb.go
generated
vendored
673
vendor/cloud.google.com/go/firestore/genproto/test.pb.go
generated
vendored
|
@ -9,6 +9,7 @@ It is generated from these files:
|
||||||
test.proto
|
test.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
|
TestSuite
|
||||||
Test
|
Test
|
||||||
GetTest
|
GetTest
|
||||||
CreateTest
|
CreateTest
|
||||||
|
@ -17,6 +18,13 @@ It has these top-level messages:
|
||||||
UpdatePathsTest
|
UpdatePathsTest
|
||||||
DeleteTest
|
DeleteTest
|
||||||
SetOption
|
SetOption
|
||||||
|
QueryTest
|
||||||
|
Clause
|
||||||
|
Select
|
||||||
|
Where
|
||||||
|
OrderBy
|
||||||
|
Cursor
|
||||||
|
DocSnapshot
|
||||||
FieldPath
|
FieldPath
|
||||||
*/
|
*/
|
||||||
package tests
|
package tests
|
||||||
|
@ -26,6 +34,7 @@ import fmt "fmt"
|
||||||
import math "math"
|
import math "math"
|
||||||
import google_firestore_v1beta14 "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
import google_firestore_v1beta14 "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
import google_firestore_v1beta1 "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
import google_firestore_v1beta1 "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
import google_firestore_v1beta12 "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
|
@ -38,6 +47,23 @@ var _ = math.Inf
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A collection of tests.
|
||||||
|
type TestSuite struct {
|
||||||
|
Tests []*Test `protobuf:"bytes,1,rep,name=tests" json:"tests,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TestSuite) Reset() { *m = TestSuite{} }
|
||||||
|
func (m *TestSuite) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*TestSuite) ProtoMessage() {}
|
||||||
|
func (*TestSuite) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
func (m *TestSuite) GetTests() []*Test {
|
||||||
|
if m != nil {
|
||||||
|
return m.Tests
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// A Test describes a single client method call and its expected result.
|
// A Test describes a single client method call and its expected result.
|
||||||
type Test struct {
|
type Test struct {
|
||||||
Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
|
Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
|
||||||
|
@ -48,13 +74,14 @@ type Test struct {
|
||||||
// *Test_Update
|
// *Test_Update
|
||||||
// *Test_UpdatePaths
|
// *Test_UpdatePaths
|
||||||
// *Test_Delete
|
// *Test_Delete
|
||||||
|
// *Test_Query
|
||||||
Test isTest_Test `protobuf_oneof:"test"`
|
Test isTest_Test `protobuf_oneof:"test"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Test) Reset() { *m = Test{} }
|
func (m *Test) Reset() { *m = Test{} }
|
||||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Test) ProtoMessage() {}
|
func (*Test) ProtoMessage() {}
|
||||||
func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
|
||||||
type isTest_Test interface {
|
type isTest_Test interface {
|
||||||
isTest_Test()
|
isTest_Test()
|
||||||
|
@ -78,6 +105,9 @@ type Test_UpdatePaths struct {
|
||||||
type Test_Delete struct {
|
type Test_Delete struct {
|
||||||
Delete *DeleteTest `protobuf:"bytes,7,opt,name=delete,oneof"`
|
Delete *DeleteTest `protobuf:"bytes,7,opt,name=delete,oneof"`
|
||||||
}
|
}
|
||||||
|
type Test_Query struct {
|
||||||
|
Query *QueryTest `protobuf:"bytes,8,opt,name=query,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
func (*Test_Get) isTest_Test() {}
|
func (*Test_Get) isTest_Test() {}
|
||||||
func (*Test_Create) isTest_Test() {}
|
func (*Test_Create) isTest_Test() {}
|
||||||
|
@ -85,6 +115,7 @@ func (*Test_Set) isTest_Test() {}
|
||||||
func (*Test_Update) isTest_Test() {}
|
func (*Test_Update) isTest_Test() {}
|
||||||
func (*Test_UpdatePaths) isTest_Test() {}
|
func (*Test_UpdatePaths) isTest_Test() {}
|
||||||
func (*Test_Delete) isTest_Test() {}
|
func (*Test_Delete) isTest_Test() {}
|
||||||
|
func (*Test_Query) isTest_Test() {}
|
||||||
|
|
||||||
func (m *Test) GetTest() isTest_Test {
|
func (m *Test) GetTest() isTest_Test {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -142,6 +173,13 @@ func (m *Test) GetDelete() *DeleteTest {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Test) GetQuery() *QueryTest {
|
||||||
|
if x, ok := m.GetTest().(*Test_Query); ok {
|
||||||
|
return x.Query
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||||
func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||||
return _Test_OneofMarshaler, _Test_OneofUnmarshaler, _Test_OneofSizer, []interface{}{
|
return _Test_OneofMarshaler, _Test_OneofUnmarshaler, _Test_OneofSizer, []interface{}{
|
||||||
|
@ -151,6 +189,7 @@ func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, f
|
||||||
(*Test_Update)(nil),
|
(*Test_Update)(nil),
|
||||||
(*Test_UpdatePaths)(nil),
|
(*Test_UpdatePaths)(nil),
|
||||||
(*Test_Delete)(nil),
|
(*Test_Delete)(nil),
|
||||||
|
(*Test_Query)(nil),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,6 +227,11 @@ func _Test_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||||
if err := b.EncodeMessage(x.Delete); err != nil {
|
if err := b.EncodeMessage(x.Delete); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
case *Test_Query:
|
||||||
|
b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Query); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
case nil:
|
case nil:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Test.Test has unexpected type %T", x)
|
return fmt.Errorf("Test.Test has unexpected type %T", x)
|
||||||
|
@ -246,6 +290,14 @@ func _Test_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (
|
||||||
err := b.DecodeMessage(msg)
|
err := b.DecodeMessage(msg)
|
||||||
m.Test = &Test_Delete{msg}
|
m.Test = &Test_Delete{msg}
|
||||||
return true, err
|
return true, err
|
||||||
|
case 8: // test.query
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(QueryTest)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Test = &Test_Query{msg}
|
||||||
|
return true, err
|
||||||
default:
|
default:
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -285,6 +337,11 @@ func _Test_OneofSizer(msg proto.Message) (n int) {
|
||||||
n += proto.SizeVarint(7<<3 | proto.WireBytes)
|
n += proto.SizeVarint(7<<3 | proto.WireBytes)
|
||||||
n += proto.SizeVarint(uint64(s))
|
n += proto.SizeVarint(uint64(s))
|
||||||
n += s
|
n += s
|
||||||
|
case *Test_Query:
|
||||||
|
s := proto.Size(x.Query)
|
||||||
|
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
case nil:
|
case nil:
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||||
|
@ -303,7 +360,7 @@ type GetTest struct {
|
||||||
func (m *GetTest) Reset() { *m = GetTest{} }
|
func (m *GetTest) Reset() { *m = GetTest{} }
|
||||||
func (m *GetTest) String() string { return proto.CompactTextString(m) }
|
func (m *GetTest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetTest) ProtoMessage() {}
|
func (*GetTest) ProtoMessage() {}
|
||||||
func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
|
||||||
func (m *GetTest) GetDocRefPath() string {
|
func (m *GetTest) GetDocRefPath() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -337,7 +394,7 @@ type CreateTest struct {
|
||||||
func (m *CreateTest) Reset() { *m = CreateTest{} }
|
func (m *CreateTest) Reset() { *m = CreateTest{} }
|
||||||
func (m *CreateTest) String() string { return proto.CompactTextString(m) }
|
func (m *CreateTest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*CreateTest) ProtoMessage() {}
|
func (*CreateTest) ProtoMessage() {}
|
||||||
func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||||
|
|
||||||
func (m *CreateTest) GetDocRefPath() string {
|
func (m *CreateTest) GetDocRefPath() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -379,7 +436,7 @@ type SetTest struct {
|
||||||
func (m *SetTest) Reset() { *m = SetTest{} }
|
func (m *SetTest) Reset() { *m = SetTest{} }
|
||||||
func (m *SetTest) String() string { return proto.CompactTextString(m) }
|
func (m *SetTest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SetTest) ProtoMessage() {}
|
func (*SetTest) ProtoMessage() {}
|
||||||
func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||||
|
|
||||||
func (m *SetTest) GetDocRefPath() string {
|
func (m *SetTest) GetDocRefPath() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -429,7 +486,7 @@ type UpdateTest struct {
|
||||||
func (m *UpdateTest) Reset() { *m = UpdateTest{} }
|
func (m *UpdateTest) Reset() { *m = UpdateTest{} }
|
||||||
func (m *UpdateTest) String() string { return proto.CompactTextString(m) }
|
func (m *UpdateTest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateTest) ProtoMessage() {}
|
func (*UpdateTest) ProtoMessage() {}
|
||||||
func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||||
|
|
||||||
func (m *UpdateTest) GetDocRefPath() string {
|
func (m *UpdateTest) GetDocRefPath() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -481,7 +538,7 @@ type UpdatePathsTest struct {
|
||||||
func (m *UpdatePathsTest) Reset() { *m = UpdatePathsTest{} }
|
func (m *UpdatePathsTest) Reset() { *m = UpdatePathsTest{} }
|
||||||
func (m *UpdatePathsTest) String() string { return proto.CompactTextString(m) }
|
func (m *UpdatePathsTest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdatePathsTest) ProtoMessage() {}
|
func (*UpdatePathsTest) ProtoMessage() {}
|
||||||
func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||||
|
|
||||||
func (m *UpdatePathsTest) GetDocRefPath() string {
|
func (m *UpdatePathsTest) GetDocRefPath() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -536,7 +593,7 @@ type DeleteTest struct {
|
||||||
func (m *DeleteTest) Reset() { *m = DeleteTest{} }
|
func (m *DeleteTest) Reset() { *m = DeleteTest{} }
|
||||||
func (m *DeleteTest) String() string { return proto.CompactTextString(m) }
|
func (m *DeleteTest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DeleteTest) ProtoMessage() {}
|
func (*DeleteTest) ProtoMessage() {}
|
||||||
func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||||
|
|
||||||
func (m *DeleteTest) GetDocRefPath() string {
|
func (m *DeleteTest) GetDocRefPath() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -575,7 +632,7 @@ type SetOption struct {
|
||||||
func (m *SetOption) Reset() { *m = SetOption{} }
|
func (m *SetOption) Reset() { *m = SetOption{} }
|
||||||
func (m *SetOption) String() string { return proto.CompactTextString(m) }
|
func (m *SetOption) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SetOption) ProtoMessage() {}
|
func (*SetOption) ProtoMessage() {}
|
||||||
func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||||
|
|
||||||
func (m *SetOption) GetAll() bool {
|
func (m *SetOption) GetAll() bool {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -591,7 +648,495 @@ func (m *SetOption) GetFields() []*FieldPath {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A field path.
|
type QueryTest struct {
|
||||||
|
CollPath string `protobuf:"bytes,1,opt,name=coll_path,json=collPath" json:"coll_path,omitempty"`
|
||||||
|
Clauses []*Clause `protobuf:"bytes,2,rep,name=clauses" json:"clauses,omitempty"`
|
||||||
|
Query *google_firestore_v1beta12.StructuredQuery `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"`
|
||||||
|
IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *QueryTest) Reset() { *m = QueryTest{} }
|
||||||
|
func (m *QueryTest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*QueryTest) ProtoMessage() {}
|
||||||
|
func (*QueryTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||||
|
|
||||||
|
func (m *QueryTest) GetCollPath() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.CollPath
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *QueryTest) GetClauses() []*Clause {
|
||||||
|
if m != nil {
|
||||||
|
return m.Clauses
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *QueryTest) GetQuery() *google_firestore_v1beta12.StructuredQuery {
|
||||||
|
if m != nil {
|
||||||
|
return m.Query
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *QueryTest) GetIsError() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.IsError
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type Clause struct {
|
||||||
|
// Types that are valid to be assigned to Clause:
|
||||||
|
// *Clause_Select
|
||||||
|
// *Clause_Where
|
||||||
|
// *Clause_OrderBy
|
||||||
|
// *Clause_Offset
|
||||||
|
// *Clause_Limit
|
||||||
|
// *Clause_StartAt
|
||||||
|
// *Clause_StartAfter
|
||||||
|
// *Clause_EndAt
|
||||||
|
// *Clause_EndBefore
|
||||||
|
Clause isClause_Clause `protobuf_oneof:"clause"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) Reset() { *m = Clause{} }
|
||||||
|
func (m *Clause) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Clause) ProtoMessage() {}
|
||||||
|
func (*Clause) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||||
|
|
||||||
|
type isClause_Clause interface {
|
||||||
|
isClause_Clause()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Clause_Select struct {
|
||||||
|
Select *Select `protobuf:"bytes,1,opt,name=select,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_Where struct {
|
||||||
|
Where *Where `protobuf:"bytes,2,opt,name=where,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_OrderBy struct {
|
||||||
|
OrderBy *OrderBy `protobuf:"bytes,3,opt,name=order_by,json=orderBy,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_Offset struct {
|
||||||
|
Offset int32 `protobuf:"varint,4,opt,name=offset,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_Limit struct {
|
||||||
|
Limit int32 `protobuf:"varint,5,opt,name=limit,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_StartAt struct {
|
||||||
|
StartAt *Cursor `protobuf:"bytes,6,opt,name=start_at,json=startAt,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_StartAfter struct {
|
||||||
|
StartAfter *Cursor `protobuf:"bytes,7,opt,name=start_after,json=startAfter,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_EndAt struct {
|
||||||
|
EndAt *Cursor `protobuf:"bytes,8,opt,name=end_at,json=endAt,oneof"`
|
||||||
|
}
|
||||||
|
type Clause_EndBefore struct {
|
||||||
|
EndBefore *Cursor `protobuf:"bytes,9,opt,name=end_before,json=endBefore,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Clause_Select) isClause_Clause() {}
|
||||||
|
func (*Clause_Where) isClause_Clause() {}
|
||||||
|
func (*Clause_OrderBy) isClause_Clause() {}
|
||||||
|
func (*Clause_Offset) isClause_Clause() {}
|
||||||
|
func (*Clause_Limit) isClause_Clause() {}
|
||||||
|
func (*Clause_StartAt) isClause_Clause() {}
|
||||||
|
func (*Clause_StartAfter) isClause_Clause() {}
|
||||||
|
func (*Clause_EndAt) isClause_Clause() {}
|
||||||
|
func (*Clause_EndBefore) isClause_Clause() {}
|
||||||
|
|
||||||
|
func (m *Clause) GetClause() isClause_Clause {
|
||||||
|
if m != nil {
|
||||||
|
return m.Clause
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetSelect() *Select {
|
||||||
|
if x, ok := m.GetClause().(*Clause_Select); ok {
|
||||||
|
return x.Select
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetWhere() *Where {
|
||||||
|
if x, ok := m.GetClause().(*Clause_Where); ok {
|
||||||
|
return x.Where
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetOrderBy() *OrderBy {
|
||||||
|
if x, ok := m.GetClause().(*Clause_OrderBy); ok {
|
||||||
|
return x.OrderBy
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetOffset() int32 {
|
||||||
|
if x, ok := m.GetClause().(*Clause_Offset); ok {
|
||||||
|
return x.Offset
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetLimit() int32 {
|
||||||
|
if x, ok := m.GetClause().(*Clause_Limit); ok {
|
||||||
|
return x.Limit
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetStartAt() *Cursor {
|
||||||
|
if x, ok := m.GetClause().(*Clause_StartAt); ok {
|
||||||
|
return x.StartAt
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetStartAfter() *Cursor {
|
||||||
|
if x, ok := m.GetClause().(*Clause_StartAfter); ok {
|
||||||
|
return x.StartAfter
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetEndAt() *Cursor {
|
||||||
|
if x, ok := m.GetClause().(*Clause_EndAt); ok {
|
||||||
|
return x.EndAt
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Clause) GetEndBefore() *Cursor {
|
||||||
|
if x, ok := m.GetClause().(*Clause_EndBefore); ok {
|
||||||
|
return x.EndBefore
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||||
|
func (*Clause) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||||
|
return _Clause_OneofMarshaler, _Clause_OneofUnmarshaler, _Clause_OneofSizer, []interface{}{
|
||||||
|
(*Clause_Select)(nil),
|
||||||
|
(*Clause_Where)(nil),
|
||||||
|
(*Clause_OrderBy)(nil),
|
||||||
|
(*Clause_Offset)(nil),
|
||||||
|
(*Clause_Limit)(nil),
|
||||||
|
(*Clause_StartAt)(nil),
|
||||||
|
(*Clause_StartAfter)(nil),
|
||||||
|
(*Clause_EndAt)(nil),
|
||||||
|
(*Clause_EndBefore)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Clause_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||||
|
m := msg.(*Clause)
|
||||||
|
// clause
|
||||||
|
switch x := m.Clause.(type) {
|
||||||
|
case *Clause_Select:
|
||||||
|
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Select); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Clause_Where:
|
||||||
|
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Where); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Clause_OrderBy:
|
||||||
|
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.OrderBy); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Clause_Offset:
|
||||||
|
b.EncodeVarint(4<<3 | proto.WireVarint)
|
||||||
|
b.EncodeVarint(uint64(x.Offset))
|
||||||
|
case *Clause_Limit:
|
||||||
|
b.EncodeVarint(5<<3 | proto.WireVarint)
|
||||||
|
b.EncodeVarint(uint64(x.Limit))
|
||||||
|
case *Clause_StartAt:
|
||||||
|
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.StartAt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Clause_StartAfter:
|
||||||
|
b.EncodeVarint(7<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.StartAfter); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Clause_EndAt:
|
||||||
|
b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.EndAt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Clause_EndBefore:
|
||||||
|
b.EncodeVarint(9<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.EndBefore); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Clause.Clause has unexpected type %T", x)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Clause_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||||
|
m := msg.(*Clause)
|
||||||
|
switch tag {
|
||||||
|
case 1: // clause.select
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Select)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_Select{msg}
|
||||||
|
return true, err
|
||||||
|
case 2: // clause.where
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Where)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_Where{msg}
|
||||||
|
return true, err
|
||||||
|
case 3: // clause.order_by
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(OrderBy)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_OrderBy{msg}
|
||||||
|
return true, err
|
||||||
|
case 4: // clause.offset
|
||||||
|
if wire != proto.WireVarint {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeVarint()
|
||||||
|
m.Clause = &Clause_Offset{int32(x)}
|
||||||
|
return true, err
|
||||||
|
case 5: // clause.limit
|
||||||
|
if wire != proto.WireVarint {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeVarint()
|
||||||
|
m.Clause = &Clause_Limit{int32(x)}
|
||||||
|
return true, err
|
||||||
|
case 6: // clause.start_at
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Cursor)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_StartAt{msg}
|
||||||
|
return true, err
|
||||||
|
case 7: // clause.start_after
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Cursor)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_StartAfter{msg}
|
||||||
|
return true, err
|
||||||
|
case 8: // clause.end_at
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Cursor)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_EndAt{msg}
|
||||||
|
return true, err
|
||||||
|
case 9: // clause.end_before
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Cursor)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Clause = &Clause_EndBefore{msg}
|
||||||
|
return true, err
|
||||||
|
default:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Clause_OneofSizer(msg proto.Message) (n int) {
|
||||||
|
m := msg.(*Clause)
|
||||||
|
// clause
|
||||||
|
switch x := m.Clause.(type) {
|
||||||
|
case *Clause_Select:
|
||||||
|
s := proto.Size(x.Select)
|
||||||
|
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Clause_Where:
|
||||||
|
s := proto.Size(x.Where)
|
||||||
|
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Clause_OrderBy:
|
||||||
|
s := proto.Size(x.OrderBy)
|
||||||
|
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Clause_Offset:
|
||||||
|
n += proto.SizeVarint(4<<3 | proto.WireVarint)
|
||||||
|
n += proto.SizeVarint(uint64(x.Offset))
|
||||||
|
case *Clause_Limit:
|
||||||
|
n += proto.SizeVarint(5<<3 | proto.WireVarint)
|
||||||
|
n += proto.SizeVarint(uint64(x.Limit))
|
||||||
|
case *Clause_StartAt:
|
||||||
|
s := proto.Size(x.StartAt)
|
||||||
|
n += proto.SizeVarint(6<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Clause_StartAfter:
|
||||||
|
s := proto.Size(x.StartAfter)
|
||||||
|
n += proto.SizeVarint(7<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Clause_EndAt:
|
||||||
|
s := proto.Size(x.EndAt)
|
||||||
|
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Clause_EndBefore:
|
||||||
|
s := proto.Size(x.EndBefore)
|
||||||
|
n += proto.SizeVarint(9<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
type Select struct {
|
||||||
|
Fields []*FieldPath `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Select) Reset() { *m = Select{} }
|
||||||
|
func (m *Select) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Select) ProtoMessage() {}
|
||||||
|
func (*Select) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||||
|
|
||||||
|
func (m *Select) GetFields() []*FieldPath {
|
||||||
|
if m != nil {
|
||||||
|
return m.Fields
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Where struct {
|
||||||
|
Path *FieldPath `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"`
|
||||||
|
Op string `protobuf:"bytes,2,opt,name=op" json:"op,omitempty"`
|
||||||
|
JsonValue string `protobuf:"bytes,3,opt,name=json_value,json=jsonValue" json:"json_value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Where) Reset() { *m = Where{} }
|
||||||
|
func (m *Where) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Where) ProtoMessage() {}
|
||||||
|
func (*Where) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||||
|
|
||||||
|
func (m *Where) GetPath() *FieldPath {
|
||||||
|
if m != nil {
|
||||||
|
return m.Path
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Where) GetOp() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Op
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Where) GetJsonValue() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.JsonValue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type OrderBy struct {
|
||||||
|
Path *FieldPath `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"`
|
||||||
|
Direction string `protobuf:"bytes,2,opt,name=direction" json:"direction,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *OrderBy) Reset() { *m = OrderBy{} }
|
||||||
|
func (m *OrderBy) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*OrderBy) ProtoMessage() {}
|
||||||
|
func (*OrderBy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||||
|
|
||||||
|
func (m *OrderBy) GetPath() *FieldPath {
|
||||||
|
if m != nil {
|
||||||
|
return m.Path
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *OrderBy) GetDirection() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Direction
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type Cursor struct {
|
||||||
|
// one of:
|
||||||
|
DocSnapshot *DocSnapshot `protobuf:"bytes,1,opt,name=doc_snapshot,json=docSnapshot" json:"doc_snapshot,omitempty"`
|
||||||
|
JsonValues []string `protobuf:"bytes,2,rep,name=json_values,json=jsonValues" json:"json_values,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Cursor) Reset() { *m = Cursor{} }
|
||||||
|
func (m *Cursor) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Cursor) ProtoMessage() {}
|
||||||
|
func (*Cursor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||||
|
|
||||||
|
func (m *Cursor) GetDocSnapshot() *DocSnapshot {
|
||||||
|
if m != nil {
|
||||||
|
return m.DocSnapshot
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Cursor) GetJsonValues() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.JsonValues
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type DocSnapshot struct {
|
||||||
|
Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"`
|
||||||
|
JsonData string `protobuf:"bytes,2,opt,name=json_data,json=jsonData" json:"json_data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DocSnapshot) Reset() { *m = DocSnapshot{} }
|
||||||
|
func (m *DocSnapshot) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*DocSnapshot) ProtoMessage() {}
|
||||||
|
func (*DocSnapshot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||||
|
|
||||||
|
func (m *DocSnapshot) GetPath() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Path
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DocSnapshot) GetJsonData() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.JsonData
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
type FieldPath struct {
|
type FieldPath struct {
|
||||||
Field []string `protobuf:"bytes,1,rep,name=field" json:"field,omitempty"`
|
Field []string `protobuf:"bytes,1,rep,name=field" json:"field,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -599,7 +1144,7 @@ type FieldPath struct {
|
||||||
func (m *FieldPath) Reset() { *m = FieldPath{} }
|
func (m *FieldPath) Reset() { *m = FieldPath{} }
|
||||||
func (m *FieldPath) String() string { return proto.CompactTextString(m) }
|
func (m *FieldPath) String() string { return proto.CompactTextString(m) }
|
||||||
func (*FieldPath) ProtoMessage() {}
|
func (*FieldPath) ProtoMessage() {}
|
||||||
func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||||
|
|
||||||
func (m *FieldPath) GetField() []string {
|
func (m *FieldPath) GetField() []string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -609,6 +1154,7 @@ func (m *FieldPath) GetField() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterType((*TestSuite)(nil), "tests.TestSuite")
|
||||||
proto.RegisterType((*Test)(nil), "tests.Test")
|
proto.RegisterType((*Test)(nil), "tests.Test")
|
||||||
proto.RegisterType((*GetTest)(nil), "tests.GetTest")
|
proto.RegisterType((*GetTest)(nil), "tests.GetTest")
|
||||||
proto.RegisterType((*CreateTest)(nil), "tests.CreateTest")
|
proto.RegisterType((*CreateTest)(nil), "tests.CreateTest")
|
||||||
|
@ -617,46 +1163,81 @@ func init() {
|
||||||
proto.RegisterType((*UpdatePathsTest)(nil), "tests.UpdatePathsTest")
|
proto.RegisterType((*UpdatePathsTest)(nil), "tests.UpdatePathsTest")
|
||||||
proto.RegisterType((*DeleteTest)(nil), "tests.DeleteTest")
|
proto.RegisterType((*DeleteTest)(nil), "tests.DeleteTest")
|
||||||
proto.RegisterType((*SetOption)(nil), "tests.SetOption")
|
proto.RegisterType((*SetOption)(nil), "tests.SetOption")
|
||||||
|
proto.RegisterType((*QueryTest)(nil), "tests.QueryTest")
|
||||||
|
proto.RegisterType((*Clause)(nil), "tests.Clause")
|
||||||
|
proto.RegisterType((*Select)(nil), "tests.Select")
|
||||||
|
proto.RegisterType((*Where)(nil), "tests.Where")
|
||||||
|
proto.RegisterType((*OrderBy)(nil), "tests.OrderBy")
|
||||||
|
proto.RegisterType((*Cursor)(nil), "tests.Cursor")
|
||||||
|
proto.RegisterType((*DocSnapshot)(nil), "tests.DocSnapshot")
|
||||||
proto.RegisterType((*FieldPath)(nil), "tests.FieldPath")
|
proto.RegisterType((*FieldPath)(nil), "tests.FieldPath")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor0 = []byte{
|
||||||
// 559 bytes of a gzipped FileDescriptorProto
|
// 994 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0x5f, 0x6f, 0xdc, 0x44,
|
||||||
0x10, 0xc5, 0x71, 0xe2, 0x24, 0x93, 0x08, 0xca, 0x0a, 0x21, 0x53, 0x0e, 0x18, 0x4b, 0x40, 0x24,
|
0x10, 0xaf, 0x7d, 0x67, 0x9f, 0x3d, 0x0e, 0xa5, 0xac, 0x50, 0x65, 0x0a, 0x88, 0xab, 0x15, 0x92,
|
||||||
0x50, 0xaa, 0xc0, 0x91, 0x13, 0x34, 0xb4, 0x88, 0x0b, 0xd5, 0x16, 0xb8, 0x46, 0xae, 0x3d, 0x09,
|
0x83, 0xa2, 0x2b, 0x09, 0xe2, 0x09, 0x09, 0x94, 0x4b, 0x48, 0x2a, 0xa4, 0xaa, 0xc1, 0x57, 0xe0,
|
||||||
0x46, 0x8e, 0xd7, 0xec, 0xae, 0xfb, 0x9f, 0x38, 0x72, 0xe7, 0x47, 0x70, 0xe4, 0x8f, 0x70, 0x47,
|
0x05, 0xe9, 0x70, 0xec, 0x71, 0x62, 0xe4, 0xf3, 0x5e, 0x77, 0xd7, 0x45, 0xfd, 0x48, 0x20, 0xf1,
|
||||||
0xfb, 0xe1, 0xda, 0x06, 0x59, 0xca, 0xa1, 0xb4, 0xb7, 0xf5, 0x9b, 0x37, 0x1f, 0xef, 0xcd, 0x6e,
|
0xc0, 0x37, 0xe1, 0x91, 0x4f, 0xc0, 0x37, 0x80, 0x67, 0xb4, 0x7f, 0x7c, 0xb6, 0xd3, 0x5c, 0xc9,
|
||||||
0x02, 0x20, 0x51, 0xc8, 0x79, 0xc1, 0x99, 0x64, 0x64, 0xa0, 0xce, 0x62, 0x7f, 0xb6, 0x61, 0x6c,
|
0x43, 0x29, 0x6f, 0xbb, 0x33, 0xbf, 0x99, 0x9d, 0xf9, 0xcd, 0xec, 0xec, 0x02, 0x08, 0xe4, 0x62,
|
||||||
0x93, 0xe1, 0xc1, 0x3a, 0xe5, 0x28, 0x24, 0xe3, 0x78, 0x70, 0xbe, 0x38, 0x43, 0x19, 0x2d, 0x6a,
|
0xba, 0x62, 0x54, 0x50, 0xe2, 0xc8, 0x35, 0xbf, 0x33, 0x39, 0xa7, 0xf4, 0xbc, 0xc4, 0xfb, 0x79,
|
||||||
0xc4, 0x24, 0xec, 0x3f, 0xea, 0x64, 0xc6, 0x6c, 0xbb, 0x65, 0xb9, 0xa1, 0x85, 0x3f, 0x7a, 0xd0,
|
0xc1, 0x90, 0x0b, 0xca, 0xf0, 0xfe, 0xd3, 0xbd, 0x33, 0x14, 0xc9, 0x5e, 0x2b, 0xd1, 0x06, 0x77,
|
||||||
0xff, 0x80, 0x42, 0x92, 0x00, 0x26, 0x09, 0x8a, 0x98, 0xa7, 0x85, 0x4c, 0x59, 0xee, 0x3b, 0x81,
|
0xde, 0xdf, 0x88, 0x4c, 0xe9, 0x72, 0x49, 0x2b, 0x03, 0xdb, 0xde, 0x08, 0x7b, 0x52, 0x23, 0x7b,
|
||||||
0x33, 0x1b, 0xd3, 0x26, 0x44, 0x42, 0x70, 0x37, 0x28, 0xfd, 0x5e, 0xe0, 0xcc, 0x26, 0xcf, 0x6f,
|
0xa6, 0x51, 0xd1, 0x14, 0xfc, 0xc7, 0xc8, 0xc5, 0xbc, 0x2e, 0x04, 0x92, 0xbb, 0xa0, 0x83, 0x09,
|
||||||
0xce, 0xf5, 0x40, 0xf3, 0x63, 0x94, 0x2a, 0xfd, 0xed, 0x0d, 0xaa, 0x82, 0xe4, 0x29, 0x78, 0x31,
|
0xad, 0xf1, 0x60, 0x12, 0xec, 0x07, 0x53, 0xb5, 0x9b, 0x4a, 0x40, 0xac, 0x35, 0xd1, 0x9f, 0x36,
|
||||||
0xc7, 0x48, 0xa2, 0xef, 0x6a, 0xda, 0x6d, 0x4b, 0x3b, 0xd4, 0xa0, 0x65, 0x5a, 0x8a, 0x2a, 0x28,
|
0x0c, 0xe5, 0x9e, 0x8c, 0x21, 0xc8, 0x90, 0xa7, 0xac, 0x58, 0x89, 0x82, 0x56, 0xa1, 0x35, 0xb6,
|
||||||
0x50, 0xfa, 0xfd, 0x56, 0xc1, 0xd3, 0xba, 0xa0, 0x30, 0x05, 0xcb, 0x22, 0x51, 0x05, 0x07, 0xad,
|
0x26, 0x7e, 0xdc, 0x15, 0x91, 0x08, 0x06, 0xe7, 0x28, 0x42, 0x7b, 0x6c, 0x4d, 0x82, 0xfd, 0x9b,
|
||||||
0x82, 0x1f, 0x35, 0x58, 0x15, 0x34, 0x14, 0xf2, 0x12, 0xa6, 0xe6, 0xb4, 0x2a, 0x22, 0xf9, 0x59,
|
0xc6, 0xd7, 0x09, 0x0a, 0x69, 0xfe, 0xe0, 0x46, 0x2c, 0x95, 0xe4, 0x1e, 0xb8, 0x29, 0xc3, 0x44,
|
||||||
0xf8, 0x9e, 0x4e, 0xb9, 0xdb, 0x4a, 0x39, 0x51, 0x11, 0x9b, 0x37, 0x29, 0x6b, 0x48, 0x75, 0x4a,
|
0x60, 0x38, 0x50, 0xb0, 0x37, 0x0c, 0xec, 0x50, 0x09, 0x0d, 0xd2, 0x40, 0xa4, 0x43, 0x8e, 0x22,
|
||||||
0x30, 0x43, 0x89, 0xfe, 0xb0, 0xd5, 0x69, 0xa9, 0xc1, 0xaa, 0x93, 0xa1, 0xbc, 0xf6, 0xa0, 0xaf,
|
0x1c, 0xf6, 0x1c, 0xce, 0x5b, 0x87, 0x5c, 0x3b, 0xac, 0x57, 0x99, 0x74, 0xe8, 0xf4, 0x1c, 0x7e,
|
||||||
0xa2, 0xa1, 0x80, 0xa1, 0x75, 0x80, 0x04, 0x30, 0x4d, 0x58, 0xbc, 0xe2, 0xb8, 0xd6, 0xdd, 0xad,
|
0xa3, 0x84, 0x8d, 0x43, 0x0d, 0x21, 0x9f, 0xc1, 0x96, 0x5e, 0x2d, 0x56, 0x89, 0xb8, 0xe0, 0xa1,
|
||||||
0x83, 0x90, 0xb0, 0x98, 0xe2, 0x5a, 0xb5, 0x20, 0x47, 0x30, 0xe4, 0xf8, 0xb5, 0x44, 0x51, 0x99,
|
0xab, 0x4c, 0x6e, 0xf7, 0x4c, 0x4e, 0xa5, 0xc6, 0xd8, 0x05, 0x75, 0x2b, 0x92, 0x27, 0x65, 0x58,
|
||||||
0xf8, 0x6c, 0x6e, 0x96, 0x34, 0xaf, 0x97, 0x67, 0x97, 0xa4, 0x7c, 0x5d, 0xb2, 0xb8, 0xdc, 0x62,
|
0xa2, 0xc0, 0x70, 0xd4, 0x3b, 0xe9, 0x48, 0x09, 0x9b, 0x93, 0x34, 0x84, 0x4c, 0xc0, 0x51, 0xac,
|
||||||
0x2e, 0xa9, 0xc9, 0xa1, 0x55, 0x72, 0xf8, 0xcd, 0x01, 0xa8, 0x0d, 0xdd, 0xa1, 0xf1, 0x7d, 0x18,
|
0x87, 0x9e, 0xc2, 0xde, 0x32, 0xd8, 0xaf, 0xa5, 0xcc, 0x40, 0x35, 0x60, 0xe6, 0xc2, 0x50, 0xea,
|
||||||
0x7f, 0x11, 0x2c, 0x5f, 0x25, 0x91, 0x8c, 0x74, 0xeb, 0x31, 0x1d, 0x29, 0x60, 0x19, 0xc9, 0x88,
|
0x22, 0x0e, 0x23, 0xc3, 0x15, 0x19, 0xc3, 0x56, 0x46, 0xd3, 0x05, 0xc3, 0x5c, 0xc5, 0x69, 0xb8,
|
||||||
0xbc, 0xaa, 0xa7, 0x32, 0x3b, 0x7b, 0xd2, 0x3d, 0xd5, 0x21, 0xdb, 0x6e, 0xd3, 0x7f, 0x06, 0x22,
|
0x86, 0x8c, 0xa6, 0x31, 0xe6, 0x32, 0x18, 0x72, 0x0c, 0x23, 0x86, 0x4f, 0x6a, 0xe4, 0x0d, 0xdd,
|
||||||
0xf7, 0x60, 0x94, 0x8a, 0x15, 0x72, 0xce, 0xb8, 0xde, 0xe6, 0x88, 0x0e, 0x53, 0xf1, 0x46, 0x7d,
|
0x1f, 0x4d, 0x75, 0xf5, 0xa7, 0x6d, 0xf3, 0x98, 0xea, 0xcb, 0x0a, 0x1c, 0xd1, 0xb4, 0x5e, 0x62,
|
||||||
0x86, 0x3f, 0x1d, 0x18, 0x9e, 0xee, 0xec, 0xd0, 0x0c, 0x3c, 0x66, 0xee, 0x9f, 0x31, 0x68, 0xaf,
|
0x25, 0x62, 0x6d, 0x13, 0x37, 0xc6, 0xd1, 0xcf, 0x16, 0x40, 0x4b, 0xfd, 0x35, 0x0e, 0x7e, 0x1b,
|
||||||
0xbe, 0x14, 0xef, 0x35, 0x4e, 0x6d, 0xbc, 0x2d, 0xc9, 0xed, 0x96, 0xd4, 0xbf, 0x04, 0x49, 0x83,
|
0xfc, 0x1f, 0x39, 0xad, 0x16, 0x59, 0x22, 0x12, 0x75, 0xb4, 0x1f, 0x7b, 0x52, 0x70, 0x94, 0x88,
|
||||||
0xb6, 0xa4, 0xdf, 0x0e, 0x40, 0x7d, 0xfd, 0x76, 0x50, 0xf5, 0x0e, 0xa6, 0x05, 0xc7, 0x98, 0xe5,
|
0x84, 0x1c, 0xb4, 0x51, 0xe9, 0xea, 0xee, 0x6e, 0x8e, 0xea, 0x90, 0x2e, 0x97, 0xc5, 0x73, 0x01,
|
||||||
0x49, 0xda, 0xd0, 0xf6, 0xb8, 0x7b, 0xa6, 0x93, 0x06, 0x9b, 0xb6, 0x72, 0xaf, 0x53, 0xf7, 0xf7,
|
0x91, 0xb7, 0xc0, 0x2b, 0xf8, 0x02, 0x19, 0xa3, 0x4c, 0xd5, 0xdd, 0x8b, 0x47, 0x05, 0xff, 0x52,
|
||||||
0x1e, 0xdc, 0xfa, 0xeb, 0x0d, 0x5d, 0xb1, 0xf8, 0x05, 0x4c, 0xd6, 0x29, 0x66, 0x89, 0x7d, 0xde,
|
0x6e, 0xa3, 0xdf, 0x2d, 0x18, 0xcd, 0xaf, 0xcd, 0xd0, 0x04, 0x5c, 0xaa, 0x3b, 0xd5, 0xee, 0x55,
|
||||||
0x6e, 0xe0, 0x36, 0xee, 0xc8, 0x91, 0x8a, 0xa8, 0x96, 0x14, 0xd6, 0xd5, 0x51, 0x90, 0x07, 0x30,
|
0x60, 0x8e, 0xe2, 0x91, 0x92, 0xc7, 0x46, 0xdf, 0x4f, 0x69, 0xb0, 0x39, 0xa5, 0xe1, 0x4b, 0x48,
|
||||||
0xd1, 0x7e, 0x9d, 0x47, 0x59, 0x89, 0xc2, 0xef, 0x07, 0xae, 0x9a, 0x4f, 0x41, 0x9f, 0x34, 0xd2,
|
0xc9, 0xe9, 0xa7, 0xf4, 0x97, 0x05, 0xd0, 0x36, 0xea, 0x35, 0xb2, 0xfa, 0x0a, 0xb6, 0x56, 0x0c,
|
||||||
0xf4, 0x6c, 0x70, 0x09, 0x9e, 0x79, 0x6d, 0xcf, 0x7e, 0x39, 0x00, 0xf5, 0x0f, 0xc8, 0x15, 0xdb,
|
0x53, 0x5a, 0x65, 0x45, 0x27, 0xb7, 0x9d, 0xcd, 0x31, 0x9d, 0x76, 0xd0, 0x71, 0xcf, 0xf6, 0xff,
|
||||||
0xf5, 0x7f, 0x5f, 0xf6, 0x31, 0x8c, 0x2f, 0x9e, 0x25, 0xd9, 0x03, 0x37, 0xca, 0x32, 0xad, 0x67,
|
0xcc, 0xfb, 0x37, 0x1b, 0x5e, 0xbf, 0x74, 0xdb, 0x5e, 0x71, 0xf2, 0x7b, 0x10, 0xe4, 0x05, 0x96,
|
||||||
0x44, 0xd5, 0x51, 0x3d, 0x65, 0xbd, 0x06, 0xe1, 0xf7, 0x3a, 0xd6, 0x64, 0xe3, 0xe1, 0x43, 0x18,
|
0x99, 0x19, 0x04, 0x03, 0x35, 0xff, 0x9a, 0x1e, 0x39, 0x96, 0x1a, 0x79, 0x64, 0x0c, 0x79, 0xb3,
|
||||||
0x5f, 0x80, 0xe4, 0x0e, 0x0c, 0x34, 0xec, 0x3b, 0x7a, 0x53, 0xe6, 0xe3, 0xcc, 0xd3, 0x7f, 0x56,
|
0xe4, 0xe4, 0x3d, 0x08, 0x14, 0x5f, 0x4f, 0x93, 0xb2, 0x46, 0x1e, 0x0e, 0xc7, 0x03, 0x19, 0x9f,
|
||||||
0x2f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x8e, 0x38, 0xdd, 0x12, 0x07, 0x00, 0x00,
|
0x14, 0x7d, 0xab, 0x24, 0x5d, 0xce, 0x9c, 0x97, 0xc0, 0x99, 0xdb, 0xe7, 0xec, 0x0f, 0x0b, 0xa0,
|
||||||
|
0x1d, 0x35, 0xaf, 0x98, 0xae, 0xff, 0xf6, 0x66, 0x9f, 0x80, 0xbf, 0xbe, 0x96, 0xe4, 0x16, 0x0c,
|
||||||
|
0x92, 0xb2, 0x54, 0xf9, 0x78, 0xb1, 0x5c, 0xca, 0xab, 0xac, 0xca, 0xc0, 0x43, 0x7b, 0x43, 0x99,
|
||||||
|
0x8c, 0x3e, 0xfa, 0xd5, 0x02, 0x7f, 0x3d, 0x62, 0x65, 0x83, 0xa7, 0xb4, 0x2c, 0xbb, 0xfc, 0x78,
|
||||||
|
0x52, 0xa0, 0xd8, 0xd9, 0x85, 0x51, 0x5a, 0x26, 0x35, 0xc7, 0xc6, 0xeb, 0x6b, 0xcd, 0x4b, 0xa4,
|
||||||
|
0xa4, 0x71, 0xa3, 0x25, 0x5f, 0x34, 0x93, 0x5c, 0x27, 0xfe, 0xc1, 0xe6, 0xc4, 0xe7, 0x82, 0xd5,
|
||||||
|
0xa9, 0xa8, 0x19, 0x66, 0x2a, 0x06, 0x33, 0xe0, 0x5f, 0x94, 0xf8, 0xdf, 0x36, 0xb8, 0xfa, 0x3c,
|
||||||
|
0xb2, 0x0b, 0x2e, 0xc7, 0x12, 0x53, 0xa1, 0x22, 0x6d, 0xc3, 0x99, 0x2b, 0xa1, 0x7c, 0x59, 0xb4,
|
||||||
|
0x9a, 0x6c, 0x83, 0xf3, 0xd3, 0x05, 0x32, 0x34, 0xf5, 0xdc, 0x32, 0xb8, 0xef, 0xa4, 0x4c, 0xbe,
|
||||||
|
0x2a, 0x4a, 0x49, 0xee, 0x81, 0x47, 0x59, 0x86, 0x6c, 0x71, 0xd6, 0x04, 0xde, 0xbc, 0x9f, 0x8f,
|
||||||
|
0xa4, 0x78, 0xf6, 0xec, 0xc1, 0x8d, 0x78, 0x44, 0xf5, 0x92, 0x84, 0xe0, 0xd2, 0x3c, 0x6f, 0x9e,
|
||||||
|
0x5a, 0x47, 0x1e, 0xa6, 0xf7, 0xe4, 0x36, 0x38, 0x65, 0xb1, 0x2c, 0x74, 0x43, 0x4b, 0x85, 0xde,
|
||||||
|
0x92, 0x0f, 0xc1, 0xe3, 0x22, 0x61, 0x62, 0x91, 0x08, 0xf3, 0x88, 0xae, 0xe9, 0xab, 0x19, 0xa7,
|
||||||
|
0x4c, 0x7a, 0x57, 0x80, 0x03, 0x41, 0x3e, 0x86, 0xc0, 0x60, 0x73, 0x81, 0xcc, 0x3c, 0x9e, 0xcf,
|
||||||
|
0xc1, 0x41, 0xc3, 0x25, 0x84, 0xec, 0x80, 0x8b, 0x55, 0x26, 0x7d, 0x7b, 0x57, 0x83, 0x1d, 0xac,
|
||||||
|
0xb2, 0x03, 0x41, 0xa6, 0x00, 0x12, 0x77, 0x86, 0x39, 0x65, 0x18, 0xfa, 0x57, 0x63, 0x7d, 0xac,
|
||||||
|
0xb2, 0x99, 0x42, 0xcc, 0x3c, 0x70, 0x75, 0x55, 0xa3, 0x7d, 0x70, 0x35, 0xb1, 0x9d, 0xe6, 0xb2,
|
||||||
|
0xfe, 0xa5, 0xb9, 0xbe, 0x07, 0x47, 0x91, 0x4c, 0xb6, 0x61, 0xb8, 0x6e, 0xa9, 0xab, 0x0c, 0x94,
|
||||||
|
0x96, 0xdc, 0x04, 0x9b, 0xae, 0xcc, 0x13, 0x69, 0xd3, 0x15, 0x79, 0x17, 0xa0, 0x1d, 0x1f, 0x66,
|
||||||
|
0xde, 0xfa, 0xeb, 0xe9, 0x11, 0x3d, 0x84, 0x91, 0xa9, 0xcc, 0x35, 0xfd, 0xbf, 0x03, 0x7e, 0x56,
|
||||||
|
0x30, 0x4c, 0xd7, 0x77, 0xdb, 0x8f, 0x5b, 0x41, 0xf4, 0x03, 0xb8, 0x9a, 0x01, 0xf2, 0xa9, 0x1e,
|
||||||
|
0x14, 0xbc, 0x4a, 0x56, 0xfc, 0x82, 0x36, 0xed, 0x45, 0x9a, 0xcf, 0x0b, 0x4d, 0xe7, 0x46, 0x13,
|
||||||
|
0x07, 0x59, 0xbb, 0xb9, 0x3c, 0xed, 0xec, 0xcb, 0xd3, 0x2e, 0xfa, 0x1c, 0x82, 0x8e, 0x31, 0x21,
|
||||||
|
0x9d, 0xa0, 0x7d, 0x13, 0xe2, 0x8b, 0x3e, 0x0b, 0xd1, 0x5d, 0xf0, 0xd7, 0x29, 0x91, 0x37, 0xc1,
|
||||||
|
0x51, 0x2c, 0xab, 0x22, 0xf8, 0xb1, 0xde, 0xcc, 0x1e, 0xc2, 0x4e, 0x4a, 0x97, 0xcd, 0x85, 0x4b,
|
||||||
|
0x4b, 0x5a, 0x67, 0x9d, 0x6b, 0x97, 0xd2, 0x2a, 0xa7, 0x6c, 0x99, 0x54, 0x29, 0xfe, 0x62, 0x47,
|
||||||
|
0x27, 0x1a, 0x74, 0xa8, 0x40, 0xc7, 0x6b, 0xd0, 0x63, 0x95, 0xe5, 0xa9, 0xfc, 0xfa, 0x9e, 0xb9,
|
||||||
|
0xea, 0x07, 0xfc, 0xc9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x96, 0x46, 0xb3, 0x8d, 0x0b,
|
||||||
|
0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
73
vendor/cloud.google.com/go/firestore/integration_test.go
generated
vendored
73
vendor/cloud.google.com/go/firestore/integration_test.go
generated
vendored
|
@ -19,6 +19,7 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -259,6 +260,7 @@ func TestIntegration_GetAll(t *testing.T) {
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
doc := coll.NewDoc()
|
doc := coll.NewDoc()
|
||||||
docRefs = append(docRefs, doc)
|
docRefs = append(docRefs, doc)
|
||||||
|
// TODO(jba): omit one create so we can test missing doc behavior.
|
||||||
mustCreate("GetAll #1", t, doc, getAll{N: i})
|
mustCreate("GetAll #1", t, doc, getAll{N: i})
|
||||||
}
|
}
|
||||||
docSnapshots, err := iClient.GetAll(ctx, docRefs)
|
docSnapshots, err := iClient.GetAll(ctx, docRefs)
|
||||||
|
@ -277,6 +279,9 @@ func TestIntegration_GetAll(t *testing.T) {
|
||||||
if got != want {
|
if got != want {
|
||||||
t.Errorf("%d: got %+v, want %+v", i, got, want)
|
t.Errorf("%d: got %+v, want %+v", i, got, want)
|
||||||
}
|
}
|
||||||
|
if ds.ReadTime.IsZero() {
|
||||||
|
t.Errorf("%d: got zero read time", i)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -686,6 +691,38 @@ func TestIntegration_Query(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test unary filters.
|
||||||
|
func TestIntegration_QueryUnary(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
coll := integrationColl(t)
|
||||||
|
mustCreate("q", t, coll.NewDoc(), map[string]interface{}{"x": 2, "q": "a"})
|
||||||
|
mustCreate("q", t, coll.NewDoc(), map[string]interface{}{"x": 2, "q": nil})
|
||||||
|
mustCreate("q", t, coll.NewDoc(), map[string]interface{}{"x": 2, "q": math.NaN()})
|
||||||
|
wantNull := map[string]interface{}{"q": nil}
|
||||||
|
wantNaN := map[string]interface{}{"q": math.NaN()}
|
||||||
|
|
||||||
|
base := coll.Select("q").Where("x", "==", 2)
|
||||||
|
for _, test := range []struct {
|
||||||
|
q Query
|
||||||
|
want map[string]interface{}
|
||||||
|
}{
|
||||||
|
{base.Where("q", "==", nil), wantNull},
|
||||||
|
{base.Where("q", "==", math.NaN()), wantNaN},
|
||||||
|
} {
|
||||||
|
got, err := test.q.Documents(ctx).GetAll()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(got) != 1 {
|
||||||
|
t.Errorf("got %d responses, want 1", len(got))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if g, w := got[0].Data(), test.want; !testEqual(g, w) {
|
||||||
|
t.Errorf("%v: got %v, want %v", test.q, g, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test the special DocumentID field in queries.
|
// Test the special DocumentID field in queries.
|
||||||
func TestIntegration_QueryName(t *testing.T) {
|
func TestIntegration_QueryName(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -783,6 +820,7 @@ func TestIntegration_RunTransaction(t *testing.T) {
|
||||||
}
|
}
|
||||||
return anError
|
return anError
|
||||||
}
|
}
|
||||||
|
|
||||||
mustCreate("RunTransaction", t, patDoc, pat)
|
mustCreate("RunTransaction", t, patDoc, pat)
|
||||||
err := client.RunTransaction(ctx, incPat)
|
err := client.RunTransaction(ctx, incPat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -813,6 +851,41 @@ func TestIntegration_RunTransaction(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIntegration_TransactionGetAll(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
type Player struct {
|
||||||
|
Name string
|
||||||
|
Score int
|
||||||
|
}
|
||||||
|
lee := Player{Name: "Lee", Score: 3}
|
||||||
|
sam := Player{Name: "Sam", Score: 1}
|
||||||
|
client := integrationClient(t)
|
||||||
|
leeDoc := iColl.Doc("lee")
|
||||||
|
samDoc := iColl.Doc("sam")
|
||||||
|
mustCreate("TransactionGetAll", t, leeDoc, lee)
|
||||||
|
mustCreate("TransactionGetAll", t, samDoc, sam)
|
||||||
|
|
||||||
|
err := client.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||||
|
docs, err := tx.GetAll([]*DocumentRef{samDoc, leeDoc})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i, want := range []Player{sam, lee} {
|
||||||
|
var got Player
|
||||||
|
if err := docs[i].DataTo(&got); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !testutil.Equal(got, want) {
|
||||||
|
return fmt.Errorf("got %+v, want %+v", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func codeEq(t *testing.T, msg string, code codes.Code, err error) {
|
func codeEq(t *testing.T, msg string, code codes.Code, err error) {
|
||||||
if grpc.Code(err) != code {
|
if grpc.Code(err) != code {
|
||||||
t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code)
|
t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code)
|
||||||
|
|
3
vendor/cloud.google.com/go/firestore/internal/doc.template
generated
vendored
3
vendor/cloud.google.com/go/firestore/internal/doc.template
generated
vendored
|
@ -21,6 +21,9 @@ database.
|
||||||
See https://cloud.google.com/firestore/docs for an introduction
|
See https://cloud.google.com/firestore/docs for an introduction
|
||||||
to Cloud Firestore and additional help on using the Firestore API.
|
to Cloud Firestore and additional help on using the Firestore API.
|
||||||
|
|
||||||
|
Note: you can't use both Cloud Firestore and Cloud Datastore in the same
|
||||||
|
project.
|
||||||
|
|
||||||
Creating a Client
|
Creating a Client
|
||||||
|
|
||||||
To start working with this package, create a client with a project ID:
|
To start working with this package, create a client with a project ID:
|
||||||
|
|
33
vendor/cloud.google.com/go/firestore/mock_test.go
generated
vendored
33
vendor/cloud.google.com/go/firestore/mock_test.go
generated
vendored
|
@ -18,9 +18,12 @@ package firestore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"cloud.google.com/go/internal/testutil"
|
"cloud.google.com/go/internal/testutil"
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes/empty"
|
"github.com/golang/protobuf/ptypes/empty"
|
||||||
|
@ -54,7 +57,10 @@ func newMockServer() (*mockServer, error) {
|
||||||
|
|
||||||
// addRPC adds a (request, response) pair to the server's list of expected
|
// addRPC adds a (request, response) pair to the server's list of expected
|
||||||
// interactions. The server will compare the incoming request with wantReq
|
// interactions. The server will compare the incoming request with wantReq
|
||||||
// using proto.Equal.
|
// using proto.Equal. The response can be a message or an error.
|
||||||
|
//
|
||||||
|
// For the Listen RPC, resp should be a []interface{}, where each element
|
||||||
|
// is either ListenResponse or an error.
|
||||||
//
|
//
|
||||||
// Passing nil for wantReq disables the request check.
|
// Passing nil for wantReq disables the request check.
|
||||||
func (s *mockServer) addRPC(wantReq proto.Message, resp interface{}) {
|
func (s *mockServer) addRPC(wantReq proto.Message, resp interface{}) {
|
||||||
|
@ -174,3 +180,28 @@ func (s *mockServer) Rollback(_ context.Context, req *pb.RollbackRequest) (*empt
|
||||||
}
|
}
|
||||||
return res.(*empty.Empty), nil
|
return res.(*empty.Empty), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *mockServer) Listen(stream pb.Firestore_ListenServer) error {
|
||||||
|
req, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
responses, err := s.popRPC(req)
|
||||||
|
if err != nil {
|
||||||
|
if status.Code(err) == codes.Unknown && strings.Contains(err.Error(), "mockServer") {
|
||||||
|
// The stream will retry on Unknown, but we don't want that to happen if
|
||||||
|
// the error comes from us.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, res := range responses.([]interface{}) {
|
||||||
|
if err, ok := res.(error); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := stream.Send(res.(*pb.ListenResponse)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
216
vendor/cloud.google.com/go/firestore/order.go
generated
vendored
Normal file
216
vendor/cloud.google.com/go/firestore/order.go
generated
vendored
Normal file
|
@ -0,0 +1,216 @@
|
||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package firestore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns a negative number, zero, or a positive number depending on whether a is
|
||||||
|
// less than, equal to, or greater than b according to Firestore's ordering of
|
||||||
|
// values.
|
||||||
|
func compareValues(a, b *pb.Value) int {
|
||||||
|
ta := typeOrder(a)
|
||||||
|
tb := typeOrder(b)
|
||||||
|
if ta != tb {
|
||||||
|
return compareInt64s(int64(ta), int64(tb))
|
||||||
|
}
|
||||||
|
switch a := a.ValueType.(type) {
|
||||||
|
case *pb.Value_NullValue:
|
||||||
|
return 0 // nulls are equal
|
||||||
|
|
||||||
|
case *pb.Value_BooleanValue:
|
||||||
|
av := a.BooleanValue
|
||||||
|
bv := b.GetBooleanValue()
|
||||||
|
switch {
|
||||||
|
case av && !bv:
|
||||||
|
return 1
|
||||||
|
case bv && !av:
|
||||||
|
return -1
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
case *pb.Value_IntegerValue:
|
||||||
|
return compareNumbers(float64(a.IntegerValue), toFloat(b))
|
||||||
|
|
||||||
|
case *pb.Value_DoubleValue:
|
||||||
|
return compareNumbers(a.DoubleValue, toFloat(b))
|
||||||
|
|
||||||
|
case *pb.Value_TimestampValue:
|
||||||
|
return compareTimestamps(a.TimestampValue, b.GetTimestampValue())
|
||||||
|
|
||||||
|
case *pb.Value_StringValue:
|
||||||
|
return strings.Compare(a.StringValue, b.GetStringValue())
|
||||||
|
|
||||||
|
case *pb.Value_BytesValue:
|
||||||
|
return bytes.Compare(a.BytesValue, b.GetBytesValue())
|
||||||
|
|
||||||
|
case *pb.Value_ReferenceValue:
|
||||||
|
return compareReferences(a.ReferenceValue, b.GetReferenceValue())
|
||||||
|
|
||||||
|
case *pb.Value_GeoPointValue:
|
||||||
|
ag := a.GeoPointValue
|
||||||
|
bg := b.GetGeoPointValue()
|
||||||
|
if ag.Latitude != bg.Latitude {
|
||||||
|
return compareFloat64s(ag.Latitude, bg.Latitude)
|
||||||
|
}
|
||||||
|
return compareFloat64s(ag.Longitude, bg.Longitude)
|
||||||
|
|
||||||
|
case *pb.Value_ArrayValue:
|
||||||
|
return compareArrays(a.ArrayValue.Values, b.GetArrayValue().Values)
|
||||||
|
|
||||||
|
case *pb.Value_MapValue:
|
||||||
|
return compareMaps(a.MapValue.Fields, b.GetMapValue().Fields)
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("bad value type: %v", a))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Treats NaN as less than any non-NaN.
|
||||||
|
func compareNumbers(a, b float64) int {
|
||||||
|
switch {
|
||||||
|
case math.IsNaN(a):
|
||||||
|
if math.IsNaN(b) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
case math.IsNaN(b):
|
||||||
|
return 1
|
||||||
|
default:
|
||||||
|
return compareFloat64s(a, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return v as a float64, assuming it's an Integer or Double.
|
||||||
|
func toFloat(v *pb.Value) float64 {
|
||||||
|
if x, ok := v.ValueType.(*pb.Value_IntegerValue); ok {
|
||||||
|
return float64(x.IntegerValue)
|
||||||
|
}
|
||||||
|
return v.GetDoubleValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareTimestamps(a, b *tspb.Timestamp) int {
|
||||||
|
if c := compareInt64s(a.Seconds, b.Seconds); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return compareInt64s(int64(a.Nanos), int64(b.Nanos))
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareReferences(a, b string) int {
|
||||||
|
// Compare path components lexicographically.
|
||||||
|
pa := strings.Split(a, "/")
|
||||||
|
pb := strings.Split(b, "/")
|
||||||
|
for i := 0; i < len(pa) && i < len(pb); i++ {
|
||||||
|
if c := strings.Compare(pa[i], pb[i]); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return compareInt64s(int64(len(pa)), int64(len(pb)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareArrays(a, b []*pb.Value) int {
|
||||||
|
for i := 0; i < len(a) && i < len(b); i++ {
|
||||||
|
if c := compareValues(a[i], b[i]); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return compareInt64s(int64(len(a)), int64(len(b)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareMaps(a, b map[string]*pb.Value) int {
|
||||||
|
sortedKeys := func(m map[string]*pb.Value) []string {
|
||||||
|
var ks []string
|
||||||
|
for k := range m {
|
||||||
|
ks = append(ks, k)
|
||||||
|
}
|
||||||
|
sort.Strings(ks)
|
||||||
|
return ks
|
||||||
|
}
|
||||||
|
|
||||||
|
aks := sortedKeys(a)
|
||||||
|
bks := sortedKeys(b)
|
||||||
|
for i := 0; i < len(aks) && i < len(bks); i++ {
|
||||||
|
if c := strings.Compare(aks[i], bks[i]); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
k := aks[i]
|
||||||
|
if c := compareValues(a[k], b[k]); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return compareInt64s(int64(len(aks)), int64(len(bks)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareFloat64s(a, b float64) int {
|
||||||
|
switch {
|
||||||
|
case a < b:
|
||||||
|
return -1
|
||||||
|
case a > b:
|
||||||
|
return 1
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareInt64s(a, b int64) int {
|
||||||
|
switch {
|
||||||
|
case a < b:
|
||||||
|
return -1
|
||||||
|
case a > b:
|
||||||
|
return 1
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an integer corresponding to the type of value stored in v, such that
|
||||||
|
// comparing the resulting integers gives the Firestore ordering for types.
|
||||||
|
func typeOrder(v *pb.Value) int {
|
||||||
|
switch v.ValueType.(type) {
|
||||||
|
case *pb.Value_NullValue:
|
||||||
|
return 0
|
||||||
|
case *pb.Value_BooleanValue:
|
||||||
|
return 1
|
||||||
|
case *pb.Value_IntegerValue:
|
||||||
|
return 2
|
||||||
|
case *pb.Value_DoubleValue:
|
||||||
|
return 2
|
||||||
|
case *pb.Value_TimestampValue:
|
||||||
|
return 3
|
||||||
|
case *pb.Value_StringValue:
|
||||||
|
return 4
|
||||||
|
case *pb.Value_BytesValue:
|
||||||
|
return 5
|
||||||
|
case *pb.Value_ReferenceValue:
|
||||||
|
return 6
|
||||||
|
case *pb.Value_GeoPointValue:
|
||||||
|
return 7
|
||||||
|
case *pb.Value_ArrayValue:
|
||||||
|
return 8
|
||||||
|
case *pb.Value_MapValue:
|
||||||
|
return 9
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("bad value type: %v", v))
|
||||||
|
}
|
||||||
|
}
|
118
vendor/cloud.google.com/go/firestore/order_test.go
generated
vendored
Normal file
118
vendor/cloud.google.com/go/firestore/order_test.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package firestore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
"google.golang.org/genproto/googleapis/type/latlng"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompareValues(t *testing.T) {
|
||||||
|
// Ordered list of values.
|
||||||
|
vals := []*pb.Value{
|
||||||
|
nullValue,
|
||||||
|
boolval(false),
|
||||||
|
boolval(true),
|
||||||
|
floatval(math.NaN()),
|
||||||
|
floatval(math.Inf(-1)),
|
||||||
|
floatval(-math.MaxFloat64),
|
||||||
|
int64val(math.MinInt64),
|
||||||
|
floatval(-1.1),
|
||||||
|
intval(-1),
|
||||||
|
intval(0),
|
||||||
|
floatval(math.SmallestNonzeroFloat64),
|
||||||
|
intval(1),
|
||||||
|
floatval(1.1),
|
||||||
|
intval(2),
|
||||||
|
int64val(math.MaxInt64),
|
||||||
|
floatval(math.MaxFloat64),
|
||||||
|
floatval(math.Inf(1)),
|
||||||
|
tsval(time.Date(2016, 5, 20, 10, 20, 0, 0, time.UTC)),
|
||||||
|
tsval(time.Date(2016, 10, 21, 15, 32, 0, 0, time.UTC)),
|
||||||
|
strval(""),
|
||||||
|
strval("\u0000\ud7ff\ue000\uffff"),
|
||||||
|
strval("(╯°□°)╯︵ ┻━┻"),
|
||||||
|
strval("a"),
|
||||||
|
strval("abc def"),
|
||||||
|
strval("e\u0301b"),
|
||||||
|
strval("æ"),
|
||||||
|
strval("\u00e9a"),
|
||||||
|
bytesval([]byte{}),
|
||||||
|
bytesval([]byte{0}),
|
||||||
|
bytesval([]byte{0, 1, 2, 3, 4}),
|
||||||
|
bytesval([]byte{0, 1, 2, 4, 3}),
|
||||||
|
bytesval([]byte{255}),
|
||||||
|
refval("projects/p1/databases/d1/documents/c1/doc1"),
|
||||||
|
refval("projects/p1/databases/d1/documents/c1/doc2"),
|
||||||
|
refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc1"),
|
||||||
|
refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc2"),
|
||||||
|
refval("projects/p1/databases/d1/documents/c10/doc1"),
|
||||||
|
refval("projects/p1/databases/dkkkkklkjnjkkk1/documents/c2/doc1"),
|
||||||
|
refval("projects/p2/databases/d2/documents/c1/doc1"),
|
||||||
|
refval("projects/p2/databases/d2/documents/c1-/doc1"),
|
||||||
|
geopoint(-90, -180),
|
||||||
|
geopoint(-90, 0),
|
||||||
|
geopoint(-90, 180),
|
||||||
|
geopoint(0, -180),
|
||||||
|
geopoint(0, 0),
|
||||||
|
geopoint(0, 180),
|
||||||
|
geopoint(1, -180),
|
||||||
|
geopoint(1, 0),
|
||||||
|
geopoint(1, 180),
|
||||||
|
geopoint(90, -180),
|
||||||
|
geopoint(90, 0),
|
||||||
|
geopoint(90, 180),
|
||||||
|
arrayval(),
|
||||||
|
arrayval(strval("bar")),
|
||||||
|
arrayval(strval("foo")),
|
||||||
|
arrayval(strval("foo"), intval(1)),
|
||||||
|
arrayval(strval("foo"), intval(2)),
|
||||||
|
arrayval(strval("foo"), strval("0")),
|
||||||
|
mapval(map[string]*pb.Value{"bar": intval(0)}),
|
||||||
|
mapval(map[string]*pb.Value{"bar": intval(0), "foo": intval(1)}),
|
||||||
|
mapval(map[string]*pb.Value{"foo": intval(1)}),
|
||||||
|
mapval(map[string]*pb.Value{"foo": intval(2)}),
|
||||||
|
mapval(map[string]*pb.Value{"foo": strval("0")}),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v1 := range vals {
|
||||||
|
if got := compareValues(v1, v1); got != 0 {
|
||||||
|
t.Errorf("compare(%v, %v) == %d, want 0", v1, v1, got)
|
||||||
|
}
|
||||||
|
for _, v2 := range vals[i+1:] {
|
||||||
|
if got := compareValues(v1, v2); got != -1 {
|
||||||
|
t.Errorf("compare(%v, %v) == %d, want -1", v1, v2, got)
|
||||||
|
}
|
||||||
|
if got := compareValues(v2, v1); got != 1 {
|
||||||
|
t.Errorf("compare(%v, %v) == %d, want 1", v1, v2, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Integers and Doubles order the same.
|
||||||
|
n1 := intval(17)
|
||||||
|
n2 := floatval(17)
|
||||||
|
if got := compareValues(n1, n2); got != 0 {
|
||||||
|
t.Errorf("compare(%v, %v) == %d, want 0", n1, n2, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func geopoint(lat, lng float64) *pb.Value {
|
||||||
|
return geoval(&latlng.LatLng{Latitude: lat, Longitude: lng})
|
||||||
|
}
|
251
vendor/cloud.google.com/go/firestore/query.go
generated
vendored
251
vendor/cloud.google.com/go/firestore/query.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
@ -43,10 +44,15 @@ type Query struct {
|
||||||
offset int32
|
offset int32
|
||||||
limit *wrappers.Int32Value
|
limit *wrappers.Int32Value
|
||||||
startVals, endVals []interface{}
|
startVals, endVals []interface{}
|
||||||
|
startDoc, endDoc *DocumentSnapshot
|
||||||
startBefore, endBefore bool
|
startBefore, endBefore bool
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *Query) collectionPath() string {
|
||||||
|
return q.parentPath + "/documents/" + q.collectionID
|
||||||
|
}
|
||||||
|
|
||||||
// DocumentID is the special field name representing the ID of a document
|
// DocumentID is the special field name representing the ID of a document
|
||||||
// in queries.
|
// in queries.
|
||||||
const DocumentID = "__name__"
|
const DocumentID = "__name__"
|
||||||
|
@ -65,18 +71,17 @@ func (q Query) Select(paths ...string) Query {
|
||||||
}
|
}
|
||||||
fps = append(fps, fp)
|
fps = append(fps, fp)
|
||||||
}
|
}
|
||||||
if fps == nil {
|
return q.SelectPaths(fps...)
|
||||||
q.selection = []FieldPath{{DocumentID}}
|
|
||||||
} else {
|
|
||||||
q.selection = fps
|
|
||||||
}
|
|
||||||
return q
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SelectPaths returns a new Query that specifies the field paths
|
// SelectPaths returns a new Query that specifies the field paths
|
||||||
// to return from the result documents.
|
// to return from the result documents.
|
||||||
func (q Query) SelectPaths(fieldPaths ...FieldPath) Query {
|
func (q Query) SelectPaths(fieldPaths ...FieldPath) Query {
|
||||||
q.selection = fieldPaths
|
if len(fieldPaths) == 0 {
|
||||||
|
q.selection = []FieldPath{{DocumentID}}
|
||||||
|
} else {
|
||||||
|
q.selection = fieldPaths
|
||||||
|
}
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,7 +133,7 @@ func (q Query) OrderBy(path string, dir Direction) Query {
|
||||||
q.err = err
|
q.err = err
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
q.orders = append(append([]order(nil), q.orders...), order{fp, dir})
|
q.orders = append(q.copyOrders(), order{fp, dir})
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,10 +141,14 @@ func (q Query) OrderBy(path string, dir Direction) Query {
|
||||||
// returned. A Query can have multiple OrderBy/OrderByPath specifications.
|
// returned. A Query can have multiple OrderBy/OrderByPath specifications.
|
||||||
// OrderByPath appends the specification to the list of existing ones.
|
// OrderByPath appends the specification to the list of existing ones.
|
||||||
func (q Query) OrderByPath(fp FieldPath, dir Direction) Query {
|
func (q Query) OrderByPath(fp FieldPath, dir Direction) Query {
|
||||||
q.orders = append(append([]order(nil), q.orders...), order{fp, dir})
|
q.orders = append(q.copyOrders(), order{fp, dir})
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *Query) copyOrders() []order {
|
||||||
|
return append([]order(nil), q.orders...)
|
||||||
|
}
|
||||||
|
|
||||||
// Offset returns a new Query that specifies the number of initial results to skip.
|
// Offset returns a new Query that specifies the number of initial results to skip.
|
||||||
// It must not be negative.
|
// It must not be negative.
|
||||||
func (q Query) Offset(n int) Query {
|
func (q Query) Offset(n int) Query {
|
||||||
|
@ -155,8 +164,13 @@ func (q Query) Limit(n int) Query {
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartAt returns a new Query that specifies that results should start at
|
// StartAt returns a new Query that specifies that results should start at
|
||||||
// the document with the given field values. The field path corresponding to
|
// the document with the given field values.
|
||||||
// each value is taken from the corresponding OrderBy call. For example, in
|
//
|
||||||
|
// If StartAt is called with a single DocumentSnapshot, its field values are used.
|
||||||
|
// The DocumentSnapshot must have all the fields mentioned in the OrderBy clauses.
|
||||||
|
//
|
||||||
|
// Otherwise, StartAt should be called with one field value for each OrderBy clause,
|
||||||
|
// in the order that they appear. For example, in
|
||||||
// q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2)
|
// q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2)
|
||||||
// results will begin at the first document where X = 1 and Y = 2.
|
// results will begin at the first document where X = 1 and Y = 2.
|
||||||
//
|
//
|
||||||
|
@ -167,8 +181,9 @@ func (q Query) Limit(n int) Query {
|
||||||
// client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork")
|
// client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork")
|
||||||
//
|
//
|
||||||
// Calling StartAt overrides a previous call to StartAt or StartAfter.
|
// Calling StartAt overrides a previous call to StartAt or StartAfter.
|
||||||
func (q Query) StartAt(fieldValues ...interface{}) Query {
|
func (q Query) StartAt(docSnapshotOrFieldValues ...interface{}) Query {
|
||||||
q.startVals, q.startBefore = fieldValues, true
|
q.startBefore = true
|
||||||
|
q.startVals, q.startDoc, q.err = q.processCursorArg("StartAt", docSnapshotOrFieldValues)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,8 +191,9 @@ func (q Query) StartAt(fieldValues ...interface{}) Query {
|
||||||
// the document with the given field values. See Query.StartAt for more information.
|
// the document with the given field values. See Query.StartAt for more information.
|
||||||
//
|
//
|
||||||
// Calling StartAfter overrides a previous call to StartAt or StartAfter.
|
// Calling StartAfter overrides a previous call to StartAt or StartAfter.
|
||||||
func (q Query) StartAfter(fieldValues ...interface{}) Query {
|
func (q Query) StartAfter(docSnapshotOrFieldValues ...interface{}) Query {
|
||||||
q.startVals, q.startBefore = fieldValues, false
|
q.startBefore = false
|
||||||
|
q.startVals, q.startDoc, q.err = q.processCursorArg("StartAfter", docSnapshotOrFieldValues)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,8 +201,9 @@ func (q Query) StartAfter(fieldValues ...interface{}) Query {
|
||||||
// document with the given field values. See Query.StartAt for more information.
|
// document with the given field values. See Query.StartAt for more information.
|
||||||
//
|
//
|
||||||
// Calling EndAt overrides a previous call to EndAt or EndBefore.
|
// Calling EndAt overrides a previous call to EndAt or EndBefore.
|
||||||
func (q Query) EndAt(fieldValues ...interface{}) Query {
|
func (q Query) EndAt(docSnapshotOrFieldValues ...interface{}) Query {
|
||||||
q.endVals, q.endBefore = fieldValues, false
|
q.endBefore = false
|
||||||
|
q.endVals, q.endDoc, q.err = q.processCursorArg("EndAt", docSnapshotOrFieldValues)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,11 +211,24 @@ func (q Query) EndAt(fieldValues ...interface{}) Query {
|
||||||
// the document with the given field values. See Query.StartAt for more information.
|
// the document with the given field values. See Query.StartAt for more information.
|
||||||
//
|
//
|
||||||
// Calling EndBefore overrides a previous call to EndAt or EndBefore.
|
// Calling EndBefore overrides a previous call to EndAt or EndBefore.
|
||||||
func (q Query) EndBefore(fieldValues ...interface{}) Query {
|
func (q Query) EndBefore(docSnapshotOrFieldValues ...interface{}) Query {
|
||||||
q.endVals, q.endBefore = fieldValues, true
|
q.endBefore = true
|
||||||
|
q.endVals, q.endDoc, q.err = q.processCursorArg("EndBefore", docSnapshotOrFieldValues)
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *Query) processCursorArg(name string, docSnapshotOrFieldValues []interface{}) ([]interface{}, *DocumentSnapshot, error) {
|
||||||
|
for _, e := range docSnapshotOrFieldValues {
|
||||||
|
if ds, ok := e.(*DocumentSnapshot); ok {
|
||||||
|
if len(docSnapshotOrFieldValues) == 1 {
|
||||||
|
return nil, ds, nil
|
||||||
|
}
|
||||||
|
return nil, nil, fmt.Errorf("firestore: a document snapshot must be the only argument to %s", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return docSnapshotOrFieldValues, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (q Query) query() *Query { return &q }
|
func (q Query) query() *Query { return &q }
|
||||||
|
|
||||||
func (q Query) toProto() (*pb.StructuredQuery, error) {
|
func (q Query) toProto() (*pb.StructuredQuery, error) {
|
||||||
|
@ -245,33 +275,79 @@ func (q Query) toProto() (*pb.StructuredQuery, error) {
|
||||||
cf.Filters = append(cf.Filters, pf)
|
cf.Filters = append(cf.Filters, pf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, ord := range q.orders {
|
orders := q.orders
|
||||||
|
if q.startDoc != nil || q.endDoc != nil {
|
||||||
|
orders = q.adjustOrders()
|
||||||
|
}
|
||||||
|
for _, ord := range orders {
|
||||||
po, err := ord.toProto()
|
po, err := ord.toProto()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
p.OrderBy = append(p.OrderBy, po)
|
p.OrderBy = append(p.OrderBy, po)
|
||||||
}
|
}
|
||||||
// StartAt and EndAt must have values that correspond exactly to the explicit order-by fields.
|
|
||||||
if len(q.startVals) != 0 {
|
cursor, err := q.toCursor(q.startVals, q.startDoc, q.startBefore, orders)
|
||||||
vals, err := q.toPositionValues(q.startVals)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p.StartAt = &pb.Cursor{Values: vals, Before: q.startBefore}
|
|
||||||
}
|
}
|
||||||
if len(q.endVals) != 0 {
|
p.StartAt = cursor
|
||||||
vals, err := q.toPositionValues(q.endVals)
|
cursor, err = q.toCursor(q.endVals, q.endDoc, q.endBefore, orders)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
|
||||||
p.EndAt = &pb.Cursor{Values: vals, Before: q.endBefore}
|
|
||||||
}
|
}
|
||||||
|
p.EndAt = cursor
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there is a start/end that uses a Document Snapshot, we may need to adjust the OrderBy
|
||||||
|
// clauses that the user provided: we add OrderBy(__name__) if it isn't already present, and
|
||||||
|
// we make sure we don't invalidate the original query by adding an OrderBy for inequality filters.
|
||||||
|
func (q *Query) adjustOrders() []order {
|
||||||
|
// If the user is already ordering by document ID, don't change anything.
|
||||||
|
for _, ord := range q.orders {
|
||||||
|
if ord.isDocumentID() {
|
||||||
|
return q.orders
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If there are OrderBy clauses, append an OrderBy(DocumentID), using the direction of the last OrderBy clause.
|
||||||
|
if len(q.orders) > 0 {
|
||||||
|
return append(q.copyOrders(), order{
|
||||||
|
fieldPath: FieldPath{DocumentID},
|
||||||
|
dir: q.orders[len(q.orders)-1].dir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// If there are no OrderBy clauses but there is an inequality, add an OrderBy clause
|
||||||
|
// for the field of the first inequality.
|
||||||
|
var orders []order
|
||||||
|
for _, f := range q.filters {
|
||||||
|
if f.op != "==" {
|
||||||
|
orders = []order{{fieldPath: f.fieldPath, dir: Asc}}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add an ascending OrderBy(DocumentID).
|
||||||
|
return append(orders, order{fieldPath: FieldPath{DocumentID}, dir: Asc})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *Query) toCursor(fieldValues []interface{}, ds *DocumentSnapshot, before bool, orders []order) (*pb.Cursor, error) {
|
||||||
|
var vals []*pb.Value
|
||||||
|
var err error
|
||||||
|
if ds != nil {
|
||||||
|
vals, err = q.docSnapshotToCursorValues(ds, orders)
|
||||||
|
} else if len(fieldValues) != 0 {
|
||||||
|
vals, err = q.fieldValuesToCursorValues(fieldValues)
|
||||||
|
} else {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &pb.Cursor{Values: vals, Before: before}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// toPositionValues converts the field values to protos.
|
// toPositionValues converts the field values to protos.
|
||||||
func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) {
|
func (q *Query) fieldValuesToCursorValues(fieldValues []interface{}) ([]*pb.Value, error) {
|
||||||
if len(fieldValues) != len(q.orders) {
|
if len(fieldValues) != len(q.orders) {
|
||||||
return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields")
|
return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields")
|
||||||
}
|
}
|
||||||
|
@ -279,12 +355,14 @@ func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error)
|
||||||
var err error
|
var err error
|
||||||
for i, ord := range q.orders {
|
for i, ord := range q.orders {
|
||||||
fval := fieldValues[i]
|
fval := fieldValues[i]
|
||||||
if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID {
|
if ord.isDocumentID() {
|
||||||
|
// TODO(jba): support DocumentRefs as well as strings.
|
||||||
|
// TODO(jba): error if document ref does not belong to the right collection.
|
||||||
docID, ok := fval.(string)
|
docID, ok := fval.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval)
|
return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval)
|
||||||
}
|
}
|
||||||
vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.parentPath + "/documents/" + q.collectionID + "/" + docID}}
|
vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.collectionPath() + "/" + docID}}
|
||||||
} else {
|
} else {
|
||||||
var sawTransform bool
|
var sawTransform bool
|
||||||
vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval))
|
vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval))
|
||||||
|
@ -299,6 +377,62 @@ func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error)
|
||||||
return vals, nil
|
return vals, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *Query) docSnapshotToCursorValues(ds *DocumentSnapshot, orders []order) ([]*pb.Value, error) {
|
||||||
|
// TODO(jba): error if doc snap does not belong to the right collection.
|
||||||
|
vals := make([]*pb.Value, len(orders))
|
||||||
|
for i, ord := range orders {
|
||||||
|
if ord.isDocumentID() {
|
||||||
|
dp, qp := ds.Ref.Parent.Path, q.collectionPath()
|
||||||
|
if dp != qp {
|
||||||
|
return nil, fmt.Errorf("firestore: document snapshot for %s passed to query on %s", dp, qp)
|
||||||
|
}
|
||||||
|
vals[i] = &pb.Value{&pb.Value_ReferenceValue{ds.Ref.Path}}
|
||||||
|
} else {
|
||||||
|
val, err := valueAtPath(ord.fieldPath, ds.proto.Fields)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
vals[i] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a function that compares DocumentSnapshots according to q's ordering.
|
||||||
|
func (q Query) compareFunc() func(d1, d2 *DocumentSnapshot) (int, error) {
|
||||||
|
// Add implicit sorting by name, using the last specified direction.
|
||||||
|
lastDir := Asc
|
||||||
|
if len(q.orders) > 0 {
|
||||||
|
lastDir = q.orders[len(q.orders)-1].dir
|
||||||
|
}
|
||||||
|
orders := append(q.copyOrders(), order{[]string{DocumentID}, lastDir})
|
||||||
|
return func(d1, d2 *DocumentSnapshot) (int, error) {
|
||||||
|
for _, ord := range orders {
|
||||||
|
var cmp int
|
||||||
|
if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID {
|
||||||
|
cmp = strings.Compare(d1.Ref.Path, d2.Ref.Path)
|
||||||
|
} else {
|
||||||
|
v1, err := valueAtPath(ord.fieldPath, d1.proto.Fields)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
v2, err := valueAtPath(ord.fieldPath, d2.proto.Fields)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
cmp = compareValues(v1, v2)
|
||||||
|
}
|
||||||
|
if cmp != 0 {
|
||||||
|
if ord.dir == Desc {
|
||||||
|
cmp = -cmp
|
||||||
|
}
|
||||||
|
return cmp, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type filter struct {
|
type filter struct {
|
||||||
fieldPath FieldPath
|
fieldPath FieldPath
|
||||||
op string
|
op string
|
||||||
|
@ -309,6 +443,21 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
|
||||||
if err := f.fieldPath.validate(); err != nil {
|
if err := f.fieldPath.validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if uop, ok := unaryOpFor(f.value); ok {
|
||||||
|
if f.op != "==" {
|
||||||
|
return nil, fmt.Errorf("firestore: must use '==' when comparing %v", f.value)
|
||||||
|
}
|
||||||
|
return &pb.StructuredQuery_Filter{
|
||||||
|
FilterType: &pb.StructuredQuery_Filter_UnaryFilter{
|
||||||
|
UnaryFilter: &pb.StructuredQuery_UnaryFilter{
|
||||||
|
OperandType: &pb.StructuredQuery_UnaryFilter_Field{
|
||||||
|
Field: fref(f.fieldPath),
|
||||||
|
},
|
||||||
|
Op: uop,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
var op pb.StructuredQuery_FieldFilter_Operator
|
var op pb.StructuredQuery_FieldFilter_Operator
|
||||||
switch f.op {
|
switch f.op {
|
||||||
case "<":
|
case "<":
|
||||||
|
@ -333,7 +482,7 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
|
||||||
}
|
}
|
||||||
return &pb.StructuredQuery_Filter{
|
return &pb.StructuredQuery_Filter{
|
||||||
FilterType: &pb.StructuredQuery_Filter_FieldFilter{
|
FilterType: &pb.StructuredQuery_Filter_FieldFilter{
|
||||||
&pb.StructuredQuery_FieldFilter{
|
FieldFilter: &pb.StructuredQuery_FieldFilter{
|
||||||
Field: fref(f.fieldPath),
|
Field: fref(f.fieldPath),
|
||||||
Op: op,
|
Op: op,
|
||||||
Value: val,
|
Value: val,
|
||||||
|
@ -342,11 +491,37 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unaryOpFor(value interface{}) (pb.StructuredQuery_UnaryFilter_Operator, bool) {
|
||||||
|
switch {
|
||||||
|
case value == nil:
|
||||||
|
return pb.StructuredQuery_UnaryFilter_IS_NULL, true
|
||||||
|
case isNaN(value):
|
||||||
|
return pb.StructuredQuery_UnaryFilter_IS_NAN, true
|
||||||
|
default:
|
||||||
|
return pb.StructuredQuery_UnaryFilter_OPERATOR_UNSPECIFIED, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNaN(x interface{}) bool {
|
||||||
|
switch x := x.(type) {
|
||||||
|
case float32:
|
||||||
|
return math.IsNaN(float64(x))
|
||||||
|
case float64:
|
||||||
|
return math.IsNaN(x)
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type order struct {
|
type order struct {
|
||||||
fieldPath FieldPath
|
fieldPath FieldPath
|
||||||
dir Direction
|
dir Direction
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r order) isDocumentID() bool {
|
||||||
|
return len(r.fieldPath) == 1 && r.fieldPath[0] == DocumentID
|
||||||
|
}
|
||||||
|
|
||||||
func (r order) toProto() (*pb.StructuredQuery_Order, error) {
|
func (r order) toProto() (*pb.StructuredQuery_Order, error) {
|
||||||
if err := r.fieldPath.validate(); err != nil {
|
if err := r.fieldPath.validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -433,7 +608,7 @@ func (it *DocumentIterator) Next() (*DocumentSnapshot, error) {
|
||||||
it.err = err
|
it.err = err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
doc, err := newDocumentSnapshot(docRef, res.Document, client)
|
doc, err := newDocumentSnapshot(docRef, res.Document, client, res.ReadTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
it.err = err
|
it.err = err
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
410
vendor/cloud.google.com/go/firestore/query_test.go
generated
vendored
410
vendor/cloud.google.com/go/firestore/query_test.go
generated
vendored
|
@ -15,6 +15,8 @@
|
||||||
package firestore
|
package firestore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
@ -22,34 +24,87 @@ import (
|
||||||
"cloud.google.com/go/internal/pretty"
|
"cloud.google.com/go/internal/pretty"
|
||||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||||
|
|
||||||
|
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
"github.com/golang/protobuf/ptypes/wrappers"
|
"github.com/golang/protobuf/ptypes/wrappers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestFilterToProto(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in filter
|
||||||
|
want *pb.StructuredQuery_Filter
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
filter{[]string{"a"}, ">", 1},
|
||||||
|
&pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_FieldFilter{
|
||||||
|
FieldFilter: &pb.StructuredQuery_FieldFilter{
|
||||||
|
Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"},
|
||||||
|
Op: pb.StructuredQuery_FieldFilter_GREATER_THAN,
|
||||||
|
Value: intval(1),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filter{[]string{"a"}, "==", nil},
|
||||||
|
&pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{
|
||||||
|
UnaryFilter: &pb.StructuredQuery_UnaryFilter{
|
||||||
|
OperandType: &pb.StructuredQuery_UnaryFilter_Field{
|
||||||
|
Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"},
|
||||||
|
},
|
||||||
|
Op: pb.StructuredQuery_UnaryFilter_IS_NULL,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filter{[]string{"a"}, "==", math.NaN()},
|
||||||
|
&pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{
|
||||||
|
UnaryFilter: &pb.StructuredQuery_UnaryFilter{
|
||||||
|
OperandType: &pb.StructuredQuery_UnaryFilter_Field{
|
||||||
|
Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"},
|
||||||
|
},
|
||||||
|
Op: pb.StructuredQuery_UnaryFilter_IS_NAN,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got, err := test.in.toProto()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !testEqual(got, test.want) {
|
||||||
|
t.Errorf("%+v:\ngot\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestQueryToProto(t *testing.T) {
|
func TestQueryToProto(t *testing.T) {
|
||||||
c := &Client{}
|
filtr := func(path []string, op string, val interface{}) *pb.StructuredQuery_Filter {
|
||||||
|
f, err := filter{path, op, val}.toProto()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &Client{projectID: "P", databaseID: "DB"}
|
||||||
coll := c.Collection("C")
|
coll := c.Collection("C")
|
||||||
q := coll.Query
|
q := coll.Query
|
||||||
aFilter, err := filter{[]string{"a"}, ">", 5}.toProto()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
bFilter, err := filter{[]string{"b"}, "<", "foo"}.toProto()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
slashStarFilter, err := filter{[]string{"/", "*"}, ">", 5}.toProto()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
type S struct {
|
type S struct {
|
||||||
A int `firestore:"a"`
|
A int `firestore:"a"`
|
||||||
}
|
}
|
||||||
|
docsnap := &DocumentSnapshot{
|
||||||
|
Ref: coll.Doc("D"),
|
||||||
|
proto: &pb.Document{
|
||||||
|
Fields: map[string]*pb.Value{"a": intval(7), "b": intval(8)},
|
||||||
|
},
|
||||||
|
}
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
|
desc string
|
||||||
in Query
|
in Query
|
||||||
want *pb.StructuredQuery
|
want *pb.StructuredQuery
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
in: q.Select(),
|
desc: "q.Select()",
|
||||||
|
in: q.Select(),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Select: &pb.StructuredQuery_Projection{
|
Select: &pb.StructuredQuery_Projection{
|
||||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("__name__")},
|
Fields: []*pb.StructuredQuery_FieldReference{fref1("__name__")},
|
||||||
|
@ -57,7 +112,8 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.Select("a", "b"),
|
desc: `q.Select("a", "b")`,
|
||||||
|
in: q.Select("a", "b"),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Select: &pb.StructuredQuery_Projection{
|
Select: &pb.StructuredQuery_Projection{
|
||||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("a"), fref1("b")},
|
Fields: []*pb.StructuredQuery_FieldReference{fref1("a"), fref1("b")},
|
||||||
|
@ -65,7 +121,8 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.Select("a", "b").Select("c"), // last wins
|
desc: `q.Select("a", "b").Select("c")`,
|
||||||
|
in: q.Select("a", "b").Select("c"), // last wins
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Select: &pb.StructuredQuery_Projection{
|
Select: &pb.StructuredQuery_Projection{
|
||||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("c")},
|
Fields: []*pb.StructuredQuery_FieldReference{fref1("c")},
|
||||||
|
@ -73,7 +130,8 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.SelectPaths([]string{"*"}, []string{"/"}),
|
desc: `q.SelectPaths([]string{"*"}, []string{"/"})`,
|
||||||
|
in: q.SelectPaths([]string{"*"}, []string{"/"}),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Select: &pb.StructuredQuery_Projection{
|
Select: &pb.StructuredQuery_Projection{
|
||||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("*"), fref1("/")},
|
Fields: []*pb.StructuredQuery_FieldReference{fref1("*"), fref1("/")},
|
||||||
|
@ -81,18 +139,35 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
desc: `q.Where("a", ">", 5)`,
|
||||||
in: q.Where("a", ">", 5),
|
in: q.Where("a", ">", 5),
|
||||||
want: &pb.StructuredQuery{Where: aFilter},
|
want: &pb.StructuredQuery{Where: filtr([]string{"a"}, ">", 5)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.Where("a", ">", 5).Where("b", "<", "foo"),
|
desc: `q.Where("a", "==", nil)`,
|
||||||
|
in: q.Where("a", "==", nil),
|
||||||
|
want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", nil)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.Where("a", "==", NaN)`,
|
||||||
|
in: q.Where("a", "==", math.NaN()),
|
||||||
|
want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", math.NaN())},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.Where("a", "==", NaN)`,
|
||||||
|
in: q.Where("a", "==", float32(math.NaN())),
|
||||||
|
want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", math.NaN())},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.Where("a", ">", 5).Where("b", "<", "foo")`,
|
||||||
|
in: q.Where("a", ">", 5).Where("b", "<", "foo"),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Where: &pb.StructuredQuery_Filter{
|
Where: &pb.StructuredQuery_Filter{
|
||||||
&pb.StructuredQuery_Filter_CompositeFilter{
|
&pb.StructuredQuery_Filter_CompositeFilter{
|
||||||
&pb.StructuredQuery_CompositeFilter{
|
&pb.StructuredQuery_CompositeFilter{
|
||||||
Op: pb.StructuredQuery_CompositeFilter_AND,
|
Op: pb.StructuredQuery_CompositeFilter_AND,
|
||||||
Filters: []*pb.StructuredQuery_Filter{
|
Filters: []*pb.StructuredQuery_Filter{
|
||||||
aFilter, bFilter,
|
filtr([]string{"a"}, ">", 5), filtr([]string{"b"}, "<", "foo"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -100,11 +175,13 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
desc: ` q.WherePath([]string{"/", "*"}, ">", 5)`,
|
||||||
in: q.WherePath([]string{"/", "*"}, ">", 5),
|
in: q.WherePath([]string{"/", "*"}, ">", 5),
|
||||||
want: &pb.StructuredQuery{Where: slashStarFilter},
|
want: &pb.StructuredQuery{Where: filtr([]string{"/", "*"}, ">", 5)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc),
|
desc: `q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc)`,
|
||||||
|
in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
OrderBy: []*pb.StructuredQuery_Order{
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
{fref1("b"), pb.StructuredQuery_ASCENDING},
|
{fref1("b"), pb.StructuredQuery_ASCENDING},
|
||||||
|
@ -114,21 +191,24 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.Offset(2).Limit(3),
|
desc: `q.Offset(2).Limit(3)`,
|
||||||
|
in: q.Offset(2).Limit(3),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Offset: 2,
|
Offset: 2,
|
||||||
Limit: &wrappers.Int32Value{3},
|
Limit: &wrappers.Int32Value{3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins
|
desc: `q.Offset(2).Limit(3).Limit(4).Offset(5)`,
|
||||||
|
in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
Offset: 5,
|
Offset: 5,
|
||||||
Limit: &wrappers.Int32Value{4},
|
Limit: &wrappers.Int32Value{4},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9),
|
desc: `q.OrderBy("a", Asc).StartAt(7).EndBefore(9)`,
|
||||||
|
in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
OrderBy: []*pb.StructuredQuery_Order{
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
@ -144,7 +224,8 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9),
|
desc: `q.OrderBy("a", Asc).StartAt(7).EndAt(9)`,
|
||||||
|
in: q.OrderBy("a", Asc).StartAt(7).EndAt(9),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
OrderBy: []*pb.StructuredQuery_Order{
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
@ -155,12 +236,13 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
EndAt: &pb.Cursor{
|
EndAt: &pb.Cursor{
|
||||||
Values: []*pb.Value{intval(9)},
|
Values: []*pb.Value{intval(9)},
|
||||||
Before: true,
|
Before: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9),
|
desc: `q.OrderBy("a", Asc).StartAfter(7).EndAt(9)`,
|
||||||
|
in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
OrderBy: []*pb.StructuredQuery_Order{
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
@ -176,7 +258,25 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10),
|
desc: `q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar")`,
|
||||||
|
in: q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar"),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{refval(coll.parentPath + "/documents/C/foo")},
|
||||||
|
Before: false,
|
||||||
|
},
|
||||||
|
EndAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{refval(coll.parentPath + "/documents/C/bar")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10)`,
|
||||||
|
in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10),
|
||||||
want: &pb.StructuredQuery{
|
want: &pb.StructuredQuery{
|
||||||
OrderBy: []*pb.StructuredQuery_Order{
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
@ -194,6 +294,7 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// last of StartAt/After wins, same for End
|
// last of StartAt/After wins, same for End
|
||||||
|
desc: `q.OrderBy("a", Asc).StartAfter(1).StartAt(2).EndAt(3).EndBefore(4)`,
|
||||||
in: q.OrderBy("a", Asc).
|
in: q.OrderBy("a", Asc).
|
||||||
StartAfter(1).StartAt(2).
|
StartAfter(1).StartAt(2).
|
||||||
EndAt(3).EndBefore(4),
|
EndAt(3).EndBefore(4),
|
||||||
|
@ -211,14 +312,128 @@ func TestQueryToProto(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// Start/End with DocumentSnapshot
|
||||||
|
// These tests are from the "Document Snapshot Cursors" doc.
|
||||||
|
{
|
||||||
|
desc: `q.StartAt(docsnap)`,
|
||||||
|
in: q.StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.OrderBy("a", Asc).StartAt(docsnap)`,
|
||||||
|
in: q.OrderBy("a", Asc).StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
desc: `q.OrderBy("a", Desc).StartAt(docsnap)`,
|
||||||
|
in: q.OrderBy("a", Desc).StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("a"), pb.StructuredQuery_DESCENDING},
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_DESCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap)`,
|
||||||
|
in: q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("a"), pb.StructuredQuery_DESCENDING},
|
||||||
|
{fref1("b"), pb.StructuredQuery_ASCENDING},
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{intval(7), intval(8), refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.Where("a", "==", 3).StartAt(docsnap)`,
|
||||||
|
in: q.Where("a", "==", 3).StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
Where: filtr([]string{"a"}, "==", 3),
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.Where("a", "<", 3).StartAt(docsnap)`,
|
||||||
|
in: q.Where("a", "<", 3).StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
Where: filtr([]string{"a"}, "<", 3),
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: `q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap)`,
|
||||||
|
in: q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap),
|
||||||
|
want: &pb.StructuredQuery{
|
||||||
|
Where: &pb.StructuredQuery_Filter{
|
||||||
|
&pb.StructuredQuery_Filter_CompositeFilter{
|
||||||
|
&pb.StructuredQuery_CompositeFilter{
|
||||||
|
Op: pb.StructuredQuery_CompositeFilter_AND,
|
||||||
|
Filters: []*pb.StructuredQuery_Filter{
|
||||||
|
filtr([]string{"b"}, "==", 1),
|
||||||
|
filtr([]string{"a"}, "<", 3),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
OrderBy: []*pb.StructuredQuery_Order{
|
||||||
|
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||||
|
{fref1("__name__"), pb.StructuredQuery_ASCENDING},
|
||||||
|
},
|
||||||
|
StartAt: &pb.Cursor{
|
||||||
|
Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")},
|
||||||
|
Before: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
got, err := test.in.toProto()
|
got, err := test.in.toProto()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%+v: %v", test.in, err)
|
t.Errorf("%s: %v", test.desc, err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
test.want.From = []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}}
|
test.want.From = []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}}
|
||||||
if !testEqual(got, test.want) {
|
if !testEqual(got, test.want) {
|
||||||
t.Errorf("%+v: got\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want))
|
t.Errorf("%s:\ngot\n%v\nwant\n%v", test.desc, pretty.Value(got), pretty.Value(test.want))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -230,7 +445,15 @@ func fref1(s string) *pb.StructuredQuery_FieldReference {
|
||||||
func TestQueryToProtoErrors(t *testing.T) {
|
func TestQueryToProtoErrors(t *testing.T) {
|
||||||
st := map[string]interface{}{"a": ServerTimestamp}
|
st := map[string]interface{}{"a": ServerTimestamp}
|
||||||
del := map[string]interface{}{"a": Delete}
|
del := map[string]interface{}{"a": Delete}
|
||||||
q := (&Client{}).Collection("C").Query
|
c := &Client{projectID: "P", databaseID: "DB"}
|
||||||
|
coll := c.Collection("C")
|
||||||
|
docsnap := &DocumentSnapshot{
|
||||||
|
Ref: coll.Doc("D"),
|
||||||
|
proto: &pb.Document{
|
||||||
|
Fields: map[string]*pb.Value{"a": intval(7)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
q := coll.Query
|
||||||
for _, query := range []Query{
|
for _, query := range []Query{
|
||||||
Query{}, // no collection ID
|
Query{}, // no collection ID
|
||||||
q.Where("x", "!=", 1), // invalid operator
|
q.Where("x", "!=", 1), // invalid operator
|
||||||
|
@ -248,6 +471,11 @@ func TestQueryToProtoErrors(t *testing.T) {
|
||||||
q.Where("x", "==", del), // Delete in filter
|
q.Where("x", "==", del), // Delete in filter
|
||||||
q.OrderBy("a", Asc).StartAt(del), // Delete in Start
|
q.OrderBy("a", Asc).StartAt(del), // Delete in Start
|
||||||
q.OrderBy("a", Asc).EndAt(del), // Delete in End
|
q.OrderBy("a", Asc).EndAt(del), // Delete in End
|
||||||
|
q.OrderBy(DocumentID, Asc).StartAt(7), // wrong type for __name__
|
||||||
|
q.OrderBy(DocumentID, Asc).EndAt(7), // wrong type for __name__
|
||||||
|
q.OrderBy("b", Asc).StartAt(docsnap), // doc snapshot does not have order-by field
|
||||||
|
q.StartAt(docsnap).EndAt("x"), // mixed doc snapshot and fields
|
||||||
|
q.StartAfter("x").EndBefore(docsnap), // mixed doc snapshot and fields
|
||||||
} {
|
} {
|
||||||
_, err := query.toProto()
|
_, err := query.toProto()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -362,10 +590,10 @@ func TestQueryGetAll(t *testing.T) {
|
||||||
Fields: map[string]*pb.Value{"f": intval(1)},
|
Fields: map[string]*pb.Value{"f": intval(1)},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2}
|
||||||
srv.addRPC(nil, []interface{}{
|
srv.addRPC(nil, []interface{}{
|
||||||
&pb.RunQueryResponse{Document: wantPBDocs[0]},
|
&pb.RunQueryResponse{Document: wantPBDocs[0], ReadTime: aTimestamp},
|
||||||
&pb.RunQueryResponse{Document: wantPBDocs[1]},
|
&pb.RunQueryResponse{Document: wantPBDocs[1], ReadTime: aTimestamp2},
|
||||||
})
|
})
|
||||||
gotDocs, err := c.Collection("C").Documents(ctx).GetAll()
|
gotDocs, err := c.Collection("C").Documents(ctx).GetAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -375,7 +603,7 @@ func TestQueryGetAll(t *testing.T) {
|
||||||
t.Errorf("got %d docs, wanted %d", got, want)
|
t.Errorf("got %d docs, wanted %d", got, want)
|
||||||
}
|
}
|
||||||
for i, got := range gotDocs {
|
for i, got := range gotDocs {
|
||||||
want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c)
|
want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c, wantReadTimes[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -387,3 +615,113 @@ func TestQueryGetAll(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestQueryCompareFunc(t *testing.T) {
|
||||||
|
mv := func(fields ...interface{}) map[string]*pb.Value {
|
||||||
|
m := map[string]*pb.Value{}
|
||||||
|
for i := 0; i < len(fields); i += 2 {
|
||||||
|
m[fields[i].(string)] = fields[i+1].(*pb.Value)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
snap := func(ref *DocumentRef, fields map[string]*pb.Value) *DocumentSnapshot {
|
||||||
|
return &DocumentSnapshot{Ref: ref, proto: &pb.Document{Fields: fields}}
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &Client{}
|
||||||
|
coll := c.Collection("C")
|
||||||
|
doc1 := coll.Doc("doc1")
|
||||||
|
doc2 := coll.Doc("doc2")
|
||||||
|
doc3 := coll.Doc("doc3")
|
||||||
|
doc4 := coll.Doc("doc4")
|
||||||
|
for _, test := range []struct {
|
||||||
|
q Query
|
||||||
|
in []*DocumentSnapshot
|
||||||
|
want []*DocumentSnapshot
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
q: coll.OrderBy("foo", Asc),
|
||||||
|
in: []*DocumentSnapshot{
|
||||||
|
snap(doc3, mv("foo", intval(2))),
|
||||||
|
snap(doc4, mv("foo", intval(1))),
|
||||||
|
snap(doc2, mv("foo", intval(2))),
|
||||||
|
},
|
||||||
|
want: []*DocumentSnapshot{
|
||||||
|
snap(doc4, mv("foo", intval(1))),
|
||||||
|
snap(doc2, mv("foo", intval(2))),
|
||||||
|
snap(doc3, mv("foo", intval(2))),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
q: coll.OrderBy("foo", Desc),
|
||||||
|
in: []*DocumentSnapshot{
|
||||||
|
snap(doc3, mv("foo", intval(2))),
|
||||||
|
snap(doc4, mv("foo", intval(1))),
|
||||||
|
snap(doc2, mv("foo", intval(2))),
|
||||||
|
},
|
||||||
|
want: []*DocumentSnapshot{
|
||||||
|
snap(doc3, mv("foo", intval(2))),
|
||||||
|
snap(doc2, mv("foo", intval(2))),
|
||||||
|
snap(doc4, mv("foo", intval(1))),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
q: coll.OrderBy("foo.bar", Asc),
|
||||||
|
in: []*DocumentSnapshot{
|
||||||
|
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
|
||||||
|
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
},
|
||||||
|
want: []*DocumentSnapshot{
|
||||||
|
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
|
||||||
|
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
q: coll.OrderBy("foo.bar", Desc),
|
||||||
|
in: []*DocumentSnapshot{
|
||||||
|
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
|
||||||
|
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
},
|
||||||
|
want: []*DocumentSnapshot{
|
||||||
|
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
|
||||||
|
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
got := append([]*DocumentSnapshot(nil), test.in...)
|
||||||
|
sort.Sort(byQuery{test.q.compareFunc(), got})
|
||||||
|
if diff := testDiff(got, test.want); diff != "" {
|
||||||
|
t.Errorf("%+v: %s", test.q, diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Want error on missing field.
|
||||||
|
q := coll.OrderBy("bar", Asc)
|
||||||
|
if q.err != nil {
|
||||||
|
t.Fatalf("bad query: %v", q.err)
|
||||||
|
}
|
||||||
|
cf := q.compareFunc()
|
||||||
|
s := snap(doc1, mv("foo", intval(1)))
|
||||||
|
if _, err := cf(s, s); err == nil {
|
||||||
|
t.Error("got nil, want error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byQuery struct {
|
||||||
|
compare func(d1, d2 *DocumentSnapshot) (int, error)
|
||||||
|
docs []*DocumentSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b byQuery) Len() int { return len(b.docs) }
|
||||||
|
func (b byQuery) Swap(i, j int) { b.docs[i], b.docs[j] = b.docs[j], b.docs[i] }
|
||||||
|
func (b byQuery) Less(i, j int) bool {
|
||||||
|
c, err := b.compare(b.docs[i], b.docs[j])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c < 0
|
||||||
|
}
|
||||||
|
|
11
vendor/cloud.google.com/go/firestore/testdata/Makefile
generated
vendored
11
vendor/cloud.google.com/go/firestore/testdata/Makefile
generated
vendored
|
@ -1,11 +0,0 @@
|
||||||
# Copy textproto files in this directory from the source of truth.
|
|
||||||
|
|
||||||
SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata
|
|
||||||
|
|
||||||
.PHONY: refresh
|
|
||||||
|
|
||||||
refresh:
|
|
||||||
-rm *.textproto
|
|
||||||
cp $(SRC)/*.textproto .
|
|
||||||
openssl dgst -sha1 $(SRC)/tests.binprotos > VERSION
|
|
||||||
|
|
2
vendor/cloud.google.com/go/firestore/testdata/VERSION
generated
vendored
2
vendor/cloud.google.com/go/firestore/testdata/VERSION
generated
vendored
|
@ -1 +1 @@
|
||||||
SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/tests.binprotos)= b0fbaaac8664945cb4f5667da092a6f9ededc57e
|
SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 3047565564b81726a57d7db719704ea8bf17a9ab
|
||||||
|
|
68
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto
generated
vendored
Normal file
68
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# When a document snapshot is used, the client appends a __name__ order-by clause
|
||||||
|
# with the direction of the last order-by clause.
|
||||||
|
|
||||||
|
description: "query: cursor methods with a document snapshot, existing orderBy"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "b"
|
||||||
|
>
|
||||||
|
direction: "desc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_after: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "b"
|
||||||
|
>
|
||||||
|
direction: DESCENDING
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: DESCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
integer_value: 8
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
76
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto
generated
vendored
Normal file
76
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# If there is an existing orderBy clause on __name__, no changes are made to the
|
||||||
|
# list of orderBy clauses.
|
||||||
|
|
||||||
|
description: "query: cursor method, doc snapshot, existing orderBy __name__"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "desc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "__name__"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_at: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: DESCENDING
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
53
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto
generated
vendored
Normal file
53
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# A Where clause using equality doesn't change the implicit orderBy clauses.
|
||||||
|
|
||||||
|
description: "query: cursor methods with a document snapshot and an equality where clause"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
where: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
op: "=="
|
||||||
|
json_value: "3"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_at: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
where: <
|
||||||
|
field_filter: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
op: EQUAL
|
||||||
|
value: <
|
||||||
|
integer_value: 3
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
72
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto
generated
vendored
Normal file
72
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# If there is an OrderBy clause, the inequality Where clause does not result in a
|
||||||
|
# new OrderBy clause. We still add a __name__ OrderBy clause
|
||||||
|
|
||||||
|
description: "query: cursor method, doc snapshot, inequality where clause, and existing orderBy clause"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "desc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
where: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
op: "<"
|
||||||
|
json_value: "4"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
where: <
|
||||||
|
field_filter: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
op: LESS_THAN
|
||||||
|
value: <
|
||||||
|
integer_value: 4
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: DESCENDING
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: DESCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
64
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto
generated
vendored
Normal file
64
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# A Where clause with an inequality results in an OrderBy clause on that clause's
|
||||||
|
# path, if there are no other OrderBy clauses.
|
||||||
|
|
||||||
|
description: "query: cursor method with a document snapshot and an inequality where clause"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
where: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
op: "<="
|
||||||
|
json_value: "3"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_before: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
where: <
|
||||||
|
field_filter: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
op: LESS_THAN_OR_EQUAL
|
||||||
|
value: <
|
||||||
|
integer_value: 3
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
34
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto
generated
vendored
Normal file
34
vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# When a document snapshot is used, the client appends a __name__ order-by clause.
|
||||||
|
|
||||||
|
description: "query: cursor methods with a document snapshot"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
doc_snapshot: <
|
||||||
|
path: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
json_data: "{\"a\": 7, \"b\": 8}"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D"
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
16
vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto
generated
vendored
Normal file
16
vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# If a cursor method with a list of values is provided, there must be at least as
|
||||||
|
# many explicit orderBy clauses as values.
|
||||||
|
|
||||||
|
description: "query: cursor method without orderBy"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
json_values: "2"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
is_error: true
|
||||||
|
>
|
50
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto
generated
vendored
Normal file
50
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# Cursor methods take the same number of values as there are OrderBy clauses.
|
||||||
|
|
||||||
|
description: "query: StartAt/EndBefore with values"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
json_values: "7"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_before: <
|
||||||
|
json_values: "9"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 9
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
48
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto
generated
vendored
Normal file
48
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# Cursor methods take the same number of values as there are OrderBy clauses.
|
||||||
|
|
||||||
|
description: "query: StartAfter/EndAt with values"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_after: <
|
||||||
|
json_values: "7"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_at: <
|
||||||
|
json_values: "9"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 9
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
71
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto
generated
vendored
Normal file
71
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# Cursor methods take the same number of values as there are OrderBy clauses.
|
||||||
|
|
||||||
|
description: "query: Start/End with two values"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "b"
|
||||||
|
>
|
||||||
|
direction: "desc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
json_values: "7"
|
||||||
|
json_values: "8"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_at: <
|
||||||
|
json_values: "9"
|
||||||
|
json_values: "10"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "b"
|
||||||
|
>
|
||||||
|
direction: DESCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 7
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
integer_value: 8
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 9
|
||||||
|
>
|
||||||
|
values: <
|
||||||
|
integer_value: 10
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
50
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto
generated
vendored
Normal file
50
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# Cursor values corresponding to a __name__ field take the document path relative
|
||||||
|
# to the query's collection.
|
||||||
|
|
||||||
|
description: "query: cursor methods with __name__"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "__name__"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_after: <
|
||||||
|
json_values: "\"D1\""
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_before: <
|
||||||
|
json_values: "\"D2\""
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "__name__"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D1"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
reference_value: "projects/projectID/databases/(default)/documents/C/D2"
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
60
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto
generated
vendored
Normal file
60
vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# When multiple Start* or End* calls occur, the values of the last one are used.
|
||||||
|
|
||||||
|
description: "query: cursor methods, last one wins"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_after: <
|
||||||
|
json_values: "1"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
start_at: <
|
||||||
|
json_values: "2"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_at: <
|
||||||
|
json_values: "3"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_before: <
|
||||||
|
json_values: "4"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
query: <
|
||||||
|
from: <
|
||||||
|
collection_id: "C"
|
||||||
|
>
|
||||||
|
order_by: <
|
||||||
|
field: <
|
||||||
|
field_path: "a"
|
||||||
|
>
|
||||||
|
direction: ASCENDING
|
||||||
|
>
|
||||||
|
start_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 2
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
end_at: <
|
||||||
|
values: <
|
||||||
|
integer_value: 4
|
||||||
|
>
|
||||||
|
before: true
|
||||||
|
>
|
||||||
|
>
|
||||||
|
>
|
23
vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto
generated
vendored
Normal file
23
vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
# DO NOT MODIFY. This file was generated by
|
||||||
|
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
|
||||||
|
|
||||||
|
# Sentinel values are not permitted in queries.
|
||||||
|
|
||||||
|
description: "query: Delete in cursor method"
|
||||||
|
query: <
|
||||||
|
coll_path: "projects/projectID/databases/(default)/documents/C"
|
||||||
|
clauses: <
|
||||||
|
order_by: <
|
||||||
|
path: <
|
||||||
|
field: "a"
|
||||||
|
>
|
||||||
|
direction: "asc"
|
||||||
|
>
|
||||||
|
>
|
||||||
|
clauses: <
|
||||||
|
end_before: <
|
||||||
|
json_values: "\"Delete\""
|
||||||
|
>
|
||||||
|
>
|
||||||
|
is_error: true
|
||||||
|
>
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue