Update dependencies

This commit is contained in:
Alexander Neumann 2017-09-13 14:09:48 +02:00
parent f3b49987f8
commit fda563d606
926 changed files with 189726 additions and 98666 deletions

74
Gopkg.lock generated
View file

@ -10,26 +10,26 @@
[[projects]] [[projects]]
name = "cloud.google.com/go" name = "cloud.google.com/go"
packages = ["compute/metadata"] packages = ["compute/metadata"]
revision = "44bcd0b2078ba5e7fedbeb36808d1ed893534750" revision = "5a9e19d4e1e41a734154e44a2132b358afb49a03"
version = "v0.11.0" version = "v0.13.0"
[[projects]] [[projects]]
name = "github.com/Azure/azure-sdk-for-go" name = "github.com/Azure/azure-sdk-for-go"
packages = ["storage"] packages = ["storage"]
revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d" revision = "df4dd90d076ebbf6e87d08d3f00bfac8ff4bde1a"
version = "v10.2.1-beta" version = "v10.3.1-beta"
[[projects]] [[projects]]
name = "github.com/Azure/go-autorest" name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"] packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "f6e08fe5e4d45c9a66e40196d3fed5f37331d224" revision = "5432abe734f8d95c78340cd56712f912906e6514"
version = "v8.1.1" version = "v8.3.1"
[[projects]] [[projects]]
name = "github.com/cpuguy83/go-md2man" name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"] packages = ["md2man"]
revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa" revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
version = "v1.0.6" version = "v1.0.7"
[[projects]] [[projects]]
name = "github.com/dgrijalva/jwt-go" name = "github.com/dgrijalva/jwt-go"
@ -39,27 +39,33 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/dustin/go-humanize"
packages = ["."]
revision = "79e699ccd02f240a1f1fbbdcee7e64c1c12e41aa"
[[projects]]
name = "github.com/elithrar/simple-scrypt" name = "github.com/elithrar/simple-scrypt"
packages = ["."] packages = ["."]
revision = "6724715de445c2e70cdafb7a1a14c8cfe0984210" revision = "2325946f714c95de4a6088202c402fbdfa64163b"
version = "v1.2.0"
[[projects]] [[projects]]
name = "github.com/go-ini/ini" name = "github.com/go-ini/ini"
packages = ["."] packages = ["."]
revision = "d3de07a94d22b4a0972deb4b96d790c2c0ce8333" revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
version = "v1.28.0" version = "v1.28.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto"] packages = ["proto"]
revision = "748d386b5c1ea99658fd69fe9f03991ce86a90c1" revision = "17ce1425424ab154092bbb43af630bd647f3bb0d"
[[projects]] [[projects]]
branch = "master"
name = "github.com/inconshreveable/mousetrap" name = "github.com/inconshreveable/mousetrap"
packages = ["."] packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -70,8 +76,8 @@
[[projects]] [[projects]]
name = "github.com/kurin/blazer" name = "github.com/kurin/blazer"
packages = ["b2","base","internal/b2types","internal/blog"] packages = ["b2","base","internal/b2types","internal/blog"]
revision = "612082ed2430716569f1ec816fc6ade849020816" revision = "1a870c3ee8b83e17d762307c6eae8f390ac3f4a0"
version = "v0.1.0" version = "v0.1.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -82,14 +88,14 @@
[[projects]] [[projects]]
name = "github.com/minio/minio-go" name = "github.com/minio/minio-go"
packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"] packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"]
revision = "5ca66c9a35ba1cd674484be99dc97aa0973afe12" revision = "4e0f567303d4cc90ceb055a451959fb9fc391fb9"
version = "v3.0.0" version = "3.0.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/ncw/swift" name = "github.com/ncw/swift"
packages = ["."] packages = ["."]
revision = "5068c3506cf003c630c94b92a64e978115394f26" revision = "9d3f812e23d270d1c66a9a01e20af1005061cdc4"
[[projects]] [[projects]]
name = "github.com/pkg/errors" name = "github.com/pkg/errors"
@ -104,10 +110,10 @@
version = "v1.2.1" version = "v1.2.1"
[[projects]] [[projects]]
branch = "master"
name = "github.com/pkg/sftp" name = "github.com/pkg/sftp"
packages = ["."] packages = ["."]
revision = "4f3e725e885c021085d2fb8a9cc26e30ea1a992f" revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
version = "1.0.0"
[[projects]] [[projects]]
name = "github.com/pkg/xattr" name = "github.com/pkg/xattr"
@ -124,8 +130,8 @@
[[projects]] [[projects]]
name = "github.com/russross/blackfriday" name = "github.com/russross/blackfriday"
packages = ["."] packages = ["."]
revision = "0b647d0506a698cca42caca173e55559b12a69f2" revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
version = "v1.4" version = "v1.5"
[[projects]] [[projects]]
name = "github.com/satori/uuid" name = "github.com/satori/uuid"
@ -134,16 +140,16 @@
version = "v1.1.0" version = "v1.1.0"
[[projects]] [[projects]]
branch = "master" name = "github.com/sirupsen/logrus"
name = "github.com/shurcooL/sanitized_anchor_name"
packages = ["."] packages = ["."]
revision = "541ff5ee47f1dddf6a5281af78307d921524bcb5" revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
version = "v1.0.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
packages = [".","doc"] packages = [".","doc"]
revision = "f20b4e9c32bb3e9d44773ca208db814f24dcd21b" revision = "b78744579491c1ceeaaa3b40205e56b0591b93a3"
[[projects]] [[projects]]
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
@ -155,31 +161,31 @@
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"] packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
revision = "6914964337150723782436d56b3f21610a74ce7b" revision = "faadfbdc035307d901e69eea569f5dda451a3ee3"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context","context/ctxhttp"] packages = ["context","context/ctxhttp"]
revision = "ab5485076ff3407ad2d02db054635913f017b0ed" revision = "b129b8e0fbeb39c8358e51a07ab6c50ad415e72e"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/oauth2" name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"] packages = [".","google","internal","jws","jwt"]
revision = "96fca6c793ec32f068f97942ae3c7c073810dfc1" revision = "13449ad91cb26cb47661c1b080790392170385fd"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix","windows"]
revision = "c4489faa6e5ab84c0ef40d6ee878f7a030281f0f" revision = "062cd7e4e68206d8bab9b18396626e855c992658"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "google.golang.org/api" name = "google.golang.org/api"
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"] packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
revision = "93a219feb72690ca01348faec80d5a076a32f688" revision = "2fe03ca2dc379c00d654a4459d1a50812cac2848"
[[projects]] [[projects]]
name = "google.golang.org/appengine" name = "google.golang.org/appengine"
@ -191,11 +197,11 @@
branch = "v2" branch = "v2"
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"
packages = ["."] packages = ["."]
revision = "25c4ec802a7d637f88d584ab26798e94ad14c13b" revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "86ff1296a8516e9135089cc88fd98cbacdc7d9986f7120557b63ab1cd789e00c" inputs-digest = "53e4779dc4c7de2cd8b195f13c215c24da5efc5e33acf584615b5c43bfefd2db"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

49
vendor/cloud.google.com/go/README.md generated vendored
View file

@ -33,6 +33,51 @@ make backwards-incompatible changes.
## News ## News
_September 8, 2017_
*v0.13.0*
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
options to continue using Legacy SQL after the client switches its default
to Standard SQL.
- bigquery: Support for updating dataset labels.
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
than the client's. DatasetsInProject is no longer needed and is deprecated.
- bigtable: Fail ListInstances when any zones fail.
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
etc.)
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
(actually a change to the underlying service, not the client)
- profiler: Service and ServiceVersion replace Target in Config.
_August 22, 2017_
*v0.12.0*
- pubsub: Subscription.Receive now uses streaming pull.
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
- errors: renamed errorreporting. The errors package will be removed shortly.
- datastore: improved retry behavior.
- bigquery: support updates to dataset metadata, with etags.
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
- bigquery: generate all job IDs on the client.
- storage: support bucket lifecycle configurations.
_July 31, 2017_ _July 31, 2017_
*v0.11.0* *v0.11.0*
@ -88,7 +133,7 @@ Google API | Status | Package
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] [Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref] [Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] [Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errors`][cloud-errors-ref] [ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
> **Alpha status**: the API is still being actively developed. As a > **Alpha status**: the API is still being actively developed. As a
@ -455,6 +500,6 @@ for more information.
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 [cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/ [cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errors [cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials [default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

View file

@ -19,12 +19,14 @@ import (
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp/cmpopts"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func defaultCopyJob() *bq.Job { func defaultCopyJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{ Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{ DestinationTable: &bq.TableReference{
@ -106,16 +108,13 @@ func TestCopy(t *testing.T) {
config: CopyConfig{JobID: "job-id"}, config: CopyConfig{JobID: "job-id"},
want: func() *bq.Job { want: func() *bq.Job {
j := defaultCopyJob() j := defaultCopyJob()
j.JobReference = &bq.JobReference{ j.JobReference.JobId = "job-id"
JobId: "job-id",
ProjectId: "client-project-id",
}
return j return j
}(), }(),
}, },
} }
for _, tc := range testCases { for i, tc := range testCases {
s := &testService{} s := &testService{}
c := &Client{ c := &Client{
service: s, service: s,
@ -127,11 +126,24 @@ func TestCopy(t *testing.T) {
tc.config.Dst = tc.dst tc.config.Dst = tc.dst
copier.CopyConfig = tc.config copier.CopyConfig = tc.config
if _, err := copier.Run(context.Background()); err != nil { if _, err := copier.Run(context.Background()); err != nil {
t.Errorf("err calling Run: %v", err) t.Errorf("#%d: err calling Run: %v", i, err)
continue continue
} }
if !testutil.Equal(s.Job, tc.want) { checkJob(t, i, s.Job, tc.want)
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want) }
} }
func checkJob(t *testing.T, i int, got, want *bq.Job) {
if got.JobReference == nil {
t.Errorf("#%d: empty job reference", i)
return
}
if got.JobReference.JobId == "" {
t.Errorf("#%d: empty job ID", i)
return
}
d := testutil.Diff(got, want, cmpopts.IgnoreFields(bq.JobReference{}, "JobId"))
if d != "" {
t.Errorf("#%d: (got=-, want=+) %s", i, d)
} }
} }

View file

@ -108,3 +108,15 @@ func TestCreateTableOptions(t *testing.T) {
} }
} }
} }
func TestCreateTableOptionsLegacySQL(t *testing.T) {
c := &Client{
projectID: "p",
service: &bigqueryService{},
}
ds := c.Dataset("d")
table := ds.Table("t")
if err := table.Create(context.Background(), UseStandardSQL(), UseLegacySQL()); err == nil {
t.Fatal("no error using both standard and legacy SQL options")
}
}

View file

@ -17,6 +17,8 @@ package bigquery
import ( import (
"time" "time"
"cloud.google.com/go/internal/optional"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
) )
@ -30,16 +32,49 @@ type Dataset struct {
type DatasetMetadata struct { type DatasetMetadata struct {
CreationTime time.Time CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified. LastModifiedTime time.Time // When the dataset or any of its tables were modified.
DefaultTableExpiration time.Duration DefaultTableExpiration time.Duration // The default expiration time for new tables.
Description string // The user-friendly description of this table. Description string // The user-friendly description of this dataset.
Name string // The user-friendly name for this table. Name string // The user-friendly name for this dataset.
ID string ID string
Location string // The geo location of the dataset. Location string // The geo location of the dataset.
Labels map[string]string // User-provided labels. Labels map[string]string // User-provided labels.
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
// TODO(jba): access rules // TODO(jba): access rules
} }
type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset.
// DefaultTableExpiration is the the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration
setLabels map[string]string
deleteLabels map[string]bool
}
// SetLabel causes a label to be added or modified when dm is used
// in a call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) SetLabel(name, value string) {
if dm.setLabels == nil {
dm.setLabels = map[string]string{}
}
dm.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted when dm is used in a
// call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) DeleteLabel(name string) {
if dm.deleteLabels == nil {
dm.deleteLabels = map[string]bool{}
}
dm.deleteLabels[name] = true
}
// Dataset creates a handle to a BigQuery dataset in the client's project. // Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset { func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id) return c.DatasetInProject(c.projectID, id)
@ -70,6 +105,14 @@ func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID) return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
} }
// Update modifies specific Dataset metadata fields.
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
return d.c.service.patchDataset(ctx, d.ProjectID, d.DatasetID, &dm, etag)
}
// Table creates a handle to a BigQuery table in the dataset. // Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata. // To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it. // If the table does not already exist, use Table.Create to create it.
@ -126,17 +169,21 @@ func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
return tok, nil return tok, nil
} }
// Datasets returns an iterator over the datasets in the Client's project. // Datasets returns an iterator over the datasets in a project.
// The Client's project is used by default, but that can be
// changed by setting ProjectID on the returned iterator before calling Next.
func (c *Client) Datasets(ctx context.Context) *DatasetIterator { func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
return c.DatasetsInProject(ctx, c.projectID) return c.DatasetsInProject(ctx, c.projectID)
} }
// DatasetsInProject returns an iterator over the datasets in the provided project. // DatasetsInProject returns an iterator over the datasets in the provided project.
//
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
it := &DatasetIterator{ it := &DatasetIterator{
ctx: ctx, ctx: ctx,
c: c, c: c,
projectID: projectID, ProjectID: projectID,
} }
it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch, it.fetch,
@ -148,18 +195,23 @@ func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *Datas
// DatasetIterator iterates over the datasets in a project. // DatasetIterator iterates over the datasets in a project.
type DatasetIterator struct { type DatasetIterator struct {
// ListHidden causes hidden datasets to be listed when set to true. // ListHidden causes hidden datasets to be listed when set to true.
// Set before the first call to Next.
ListHidden bool ListHidden bool
// Filter restricts the datasets returned by label. The filter syntax is described in // Filter restricts the datasets returned by label. The filter syntax is described in
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels // https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
// Set before the first call to Next.
Filter string Filter string
ctx context.Context // The project ID of the listed datasets.
projectID string // Set before the first call to Next.
c *Client ProjectID string
pageInfo *iterator.PageInfo
nextFunc func() error ctx context.Context
items []*Dataset c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
} }
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. // PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
@ -175,7 +227,7 @@ func (it *DatasetIterator) Next() (*Dataset, error) {
} }
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID, datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.ProjectID,
pageSize, pageToken, it.ListHidden, it.Filter) pageSize, pageToken, it.ListHidden, it.Filter)
if err != nil { if err != nil {
return "", err return "", err

View file

@ -262,6 +262,44 @@ func ExampleDataset_Metadata() {
fmt.Println(md) fmt.Println(md)
} }
// This example illustrates how to perform a read-modify-write sequence on dataset
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleDataset_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
md, err := ds.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := ds.Update(ctx,
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleDataset_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleDataset_Table() { func ExampleDataset_Table() {
ctx := context.Background() ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id") client, err := bigquery.NewClient(ctx, "project-id")
@ -356,6 +394,7 @@ func ExampleTable_Create() {
} }
} }
// If you know your table's schema initially, pass a Schema to Create.
func ExampleTable_Create_schema() { func ExampleTable_Create_schema() {
ctx := context.Background() ctx := context.Background()
// Infer table schema from a Go type. // Infer table schema from a Go type.
@ -476,6 +515,8 @@ func ExampleTable_LoaderFrom() {
} }
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true gcsRef.AllowJaggedRows = true
gcsRef.MaxBadRecords = 5
gcsRef.Schema = schema
// TODO: set other options on the GCSReference. // TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset") ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(gcsRef) loader := ds.Table("my_table").LoaderFrom(gcsRef)
@ -506,6 +547,8 @@ func ExampleTable_LoaderFrom_reader() {
} }
rs := bigquery.NewReaderSource(f) rs := bigquery.NewReaderSource(f)
rs.AllowJaggedRows = true rs.AllowJaggedRows = true
rs.MaxBadRecords = 5
rs.Schema = schema
// TODO: set other options on the GCSReference. // TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset") ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(rs) loader := ds.Table("my_table").LoaderFrom(rs)
@ -534,7 +577,32 @@ func ExampleTable_Read() {
_ = it // TODO: iterate using Next or iterator.Pager. _ = it // TODO: iterate using Next or iterator.Pager.
} }
func ExampleTable_Update() { // This example illustrates how to perform a read-modify-write sequence on table
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleTable_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
md, err := t.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := t.Update(ctx,
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleTable_Update_blindWrite() {
ctx := context.Background() ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id") client, err := bigquery.NewClient(ctx, "project-id")
if err != nil { if err != nil {
@ -543,7 +611,7 @@ func ExampleTable_Update() {
t := client.Dataset("my_dataset").Table("my_table") t := client.Dataset("my_dataset").Table("my_table")
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "my favorite table", Description: "my favorite table",
}) }, "")
if err != nil { if err != nil {
// TODO: Handle error. // TODO: Handle error.
} }

View file

@ -17,8 +17,6 @@ package bigquery
import ( import (
"testing" "testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
@ -26,10 +24,11 @@ import (
func defaultExtractJob() *bq.Job { func defaultExtractJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{ Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{ SourceTable: &bq.TableReference{
ProjectId: "project-id", ProjectId: "client-project-id",
DatasetId: "dataset-id", DatasetId: "dataset-id",
TableId: "table-id", TableId: "table-id",
}, },
@ -43,7 +42,7 @@ func TestExtract(t *testing.T) {
s := &testService{} s := &testService{}
c := &Client{ c := &Client{
service: s, service: s,
projectID: "project-id", projectID: "client-project-id",
} }
testCases := []struct { testCases := []struct {
@ -87,17 +86,15 @@ func TestExtract(t *testing.T) {
}, },
} }
for _, tc := range testCases { for i, tc := range testCases {
ext := tc.src.ExtractorTo(tc.dst) ext := tc.src.ExtractorTo(tc.dst)
tc.config.Src = ext.Src tc.config.Src = ext.Src
tc.config.Dst = ext.Dst tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config ext.ExtractConfig = tc.config
if _, err := ext.Run(context.Background()); err != nil { if _, err := ext.Run(context.Background()); err != nil {
t.Errorf("err calling extract: %v", err) t.Errorf("#%d: err calling extract: %v", i, err)
continue continue
} }
if !testutil.Equal(s.Job, tc.want) { checkJob(t, i, s.Job, tc.want)
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
} }
} }

View file

@ -43,7 +43,10 @@ var (
dataset *Dataset dataset *Dataset
schema = Schema{ schema = Schema{
{Name: "name", Type: StringFieldType}, {Name: "name", Type: StringFieldType},
{Name: "num", Type: IntegerFieldType}, {Name: "nums", Type: IntegerFieldType, Repeated: true},
{Name: "rec", Type: RecordFieldType, Schema: Schema{
{Name: "bool", Type: BooleanFieldType},
}},
} }
testTableExpiration time.Time testTableExpiration time.Time
) )
@ -218,6 +221,119 @@ func TestIntegration_DatasetDelete(t *testing.T) {
} }
} }
func TestIntegration_DatasetUpdateETags(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
check := func(md *DatasetMetadata, wantDesc, wantName string) {
if md.Description != wantDesc {
t.Errorf("description: got %q, want %q", md.Description, wantDesc)
}
if md.Name != wantName {
t.Errorf("name: got %q, want %q", md.Name, wantName)
}
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
if md.ETag == "" {
t.Fatal("empty ETag")
}
// Write without ETag succeeds.
desc := md.Description + "d2"
name := md.Name + "n2"
md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "")
if err != nil {
t.Fatal(err)
}
check(md2, desc, name)
// Write with original ETag fails because of intervening write.
_, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag)
if err == nil {
t.Fatal("got nil, want error")
}
// Write with most recent ETag succeeds.
md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag)
if err != nil {
t.Fatal(err)
}
check(md3, "", "")
}
func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// Set the default expiration time.
md, err = dataset.Update(ctx,
DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != time.Hour {
t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
}
// Omitting DefaultTableExpiration doesn't change it.
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != time.Hour {
t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
}
// Setting it to 0 deletes it (which looks like a 0 duration).
md, err = dataset.Update(ctx,
DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != 0 {
t.Fatalf("got %s, want 0", md.DefaultTableExpiration)
}
}
func TestIntegration_DatasetUpdateLabels(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// TODO(jba): use a separate dataset for each test run so
// tests don't interfere with each other.
var dm DatasetMetadataToUpdate
dm.SetLabel("label", "value")
md, err = dataset.Update(ctx, dm, "")
if err != nil {
t.Fatal(err)
}
if got, want := md.Labels["label"], "value"; got != want {
t.Errorf("got %q, want %q", got, want)
}
dm = DatasetMetadataToUpdate{}
dm.DeleteLabel("label")
md, err = dataset.Update(ctx, dm, "")
if err != nil {
t.Fatal(err)
}
if _, ok := md.Labels["label"]; ok {
t.Error("label still present after deletion")
}
}
func TestIntegration_Tables(t *testing.T) { func TestIntegration_Tables(t *testing.T) {
if client == nil { if client == nil {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
@ -274,7 +390,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
saverRows []*ValuesSaver saverRows []*ValuesSaver
) )
for i, name := range []string{"a", "b", "c"} { for i, name := range []string{"a", "b", "c"} {
row := []Value{name, int64(i)} row := []Value{name, []Value{int64(i)}, []Value{true}}
wantRows = append(wantRows, row) wantRows = append(wantRows, row)
saverRows = append(saverRows, &ValuesSaver{ saverRows = append(saverRows, &ValuesSaver{
Schema: schema, Schema: schema,
@ -296,7 +412,8 @@ func TestIntegration_UploadAndRead(t *testing.T) {
checkRead(t, "upload", table.Read(ctx), wantRows) checkRead(t, "upload", table.Read(ctx), wantRows)
// Query the table. // Query the table.
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID)) q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
q.UseStandardSQL = true
q.DefaultProjectID = dataset.ProjectID q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID q.DefaultDatasetID = dataset.DatasetID
@ -361,9 +478,11 @@ func TestIntegration_UploadAndRead(t *testing.T) {
if got, want := len(vm), len(vl); got != want { if got, want := len(vm), len(vl); got != want {
t.Fatalf("valueMap len: got %d, want %d", got, want) t.Fatalf("valueMap len: got %d, want %d", got, want)
} }
// With maps, structs become nested maps.
vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]}
for i, v := range vl { for i, v := range vl {
if got, want := vm[schema[i].Name], v; got != want { if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) {
t.Errorf("%d, name=%s: got %v, want %v", t.Errorf("%d, name=%s: got %#v, want %#v",
i, schema[i].Name, got, want) i, schema[i].Name, got, want)
} }
} }
@ -520,7 +639,7 @@ func (b byName) Len() int { return len(b) }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name } func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func TestIntegration_Update(t *testing.T) { func TestIntegration_TableUpdate(t *testing.T) {
if client == nil { if client == nil {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
} }
@ -535,10 +654,12 @@ func TestIntegration_Update(t *testing.T) {
} }
wantDescription := tm.Description + "more" wantDescription := tm.Description + "more"
wantName := tm.Name + "more" wantName := tm.Name + "more"
wantExpiration := tm.ExpirationTime.Add(time.Hour * 24)
got, err := table.Update(ctx, TableMetadataToUpdate{ got, err := table.Update(ctx, TableMetadataToUpdate{
Description: wantDescription, Description: wantDescription,
Name: wantName, Name: wantName,
}) ExpirationTime: wantExpiration,
}, tm.ETag)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -548,10 +669,24 @@ func TestIntegration_Update(t *testing.T) {
if got.Name != wantName { if got.Name != wantName {
t.Errorf("Name: got %q, want %q", got.Name, wantName) t.Errorf("Name: got %q, want %q", got.Name, wantName)
} }
if got.ExpirationTime != wantExpiration {
t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration)
}
if !testutil.Equal(got.Schema, schema) { if !testutil.Equal(got.Schema, schema) {
t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema)) t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
} }
// Blind write succeeds.
_, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "")
if err != nil {
t.Fatal(err)
}
// Write with old etag fails.
_, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag)
if err == nil {
t.Fatal("Update with old ETag succeeded, wanted failure")
}
// Test schema update. // Test schema update.
// Columns can be added. schema2 is the same as schema, except for the // Columns can be added. schema2 is the same as schema, except for the
// added column in the middle. // added column in the middle.
@ -561,24 +696,25 @@ func TestIntegration_Update(t *testing.T) {
} }
schema2 := Schema{ schema2 := Schema{
schema[0], schema[0],
{Name: "rec", Type: RecordFieldType, Schema: nested}, {Name: "rec2", Type: RecordFieldType, Schema: nested},
schema[1], schema[1],
schema[2],
} }
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}) got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Wherever you add the column, it appears at the end. // Wherever you add the column, it appears at the end.
schema3 := Schema{schema2[0], schema2[2], schema2[1]} schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]}
if !testutil.Equal(got.Schema, schema3) { if !testutil.Equal(got.Schema, schema3) {
t.Errorf("add field:\ngot %v\nwant %v", t.Errorf("add field:\ngot %v\nwant %v",
pretty.Value(got.Schema), pretty.Value(schema3)) pretty.Value(got.Schema), pretty.Value(schema3))
} }
// Updating with the empty schema succeeds, but is a no-op. // Updating with the empty schema succeeds, but is a no-op.
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}) got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -587,30 +723,31 @@ func TestIntegration_Update(t *testing.T) {
pretty.Value(got.Schema), pretty.Value(schema3)) pretty.Value(got.Schema), pretty.Value(schema3))
} }
// Error cases. // Error cases when updating schema.
for _, test := range []struct { for _, test := range []struct {
desc string desc string
fields []*FieldSchema fields []*FieldSchema
}{ }{
{"change from optional to required", []*FieldSchema{ {"change from optional to required", []*FieldSchema{
schema3[0], {Name: "name", Type: StringFieldType, Required: true},
{Name: "num", Type: IntegerFieldType, Required: true}, schema3[1],
schema3[2], schema3[2],
schema3[3],
}}, }},
{"add a required field", []*FieldSchema{ {"add a required field", []*FieldSchema{
schema3[0], schema3[1], schema3[2], schema3[0], schema3[1], schema3[2], schema3[3],
{Name: "req", Type: StringFieldType, Required: true}, {Name: "req", Type: StringFieldType, Required: true},
}}, }},
{"remove a field", []*FieldSchema{schema3[0], schema3[1]}}, {"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}},
{"remove a nested field", []*FieldSchema{ {"remove a nested field", []*FieldSchema{
schema3[0], schema3[1], schema3[0], schema3[1], schema3[2],
{Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, {Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
{"remove all nested fields", []*FieldSchema{ {"remove all nested fields", []*FieldSchema{
schema3[0], schema3[1], schema3[0], schema3[1], schema3[2],
{Name: "rec", Type: RecordFieldType, Schema: Schema{}}}}, {Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
} { } {
for { for {
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}) _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "")
if !hasStatusCode(err, 403) { if !hasStatusCode(err, 403) {
break break
} }
@ -631,7 +768,11 @@ func TestIntegration_Load(t *testing.T) {
t.Skip("Integration tests skipped") t.Skip("Integration tests skipped")
} }
ctx := context.Background() ctx := context.Background()
table := newTable(t, schema) // CSV data can't be loaded into a repeated field, so we use a different schema.
table := newTable(t, Schema{
{Name: "name", Type: StringFieldType},
{Name: "nums", Type: IntegerFieldType},
})
defer table.Delete(ctx) defer table.Delete(ctx)
// Load the table from a reader. // Load the table from a reader.
@ -666,20 +807,22 @@ func TestIntegration_DML(t *testing.T) {
// Use DML to insert. // Use DML to insert.
wantRows := [][]Value{ wantRows := [][]Value{
[]Value{"a", int64(0)}, []Value{"a", []Value{int64(0)}, []Value{true}},
[]Value{"b", int64(1)}, []Value{"b", []Value{int64(1)}, []Value{false}},
[]Value{"c", int64(2)}, []Value{"c", []Value{int64(2)}, []Value{true}},
} }
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+ query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, nums, rec) "+
"VALUES ('a', 0), ('b', 1), ('c', 2)", "VALUES ('a', [0], STRUCT<BOOL>(TRUE)), ('b', [1], STRUCT<BOOL>(FALSE)), ('c', [2], STRUCT<BOOL>(TRUE))",
table.TableID) table.TableID)
q := client.Query(query) q := client.Query(query)
q.UseStandardSQL = true // necessary for DML q.UseStandardSQL = true // necessary for DML
job, err := q.Run(ctx) job, err := q.Run(ctx)
if err != nil { if err != nil {
fmt.Printf("q.Run: %v\n", err)
return false, err return false, err
} }
if err := wait(ctx, job); err != nil { if err := wait(ctx, job); err != nil {
fmt.Printf("wait: %v\n", err)
return false, err return false, err
} }
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok { if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
@ -890,7 +1033,7 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
upl := table.Uploader() upl := table.Uploader()
row := &ValuesSaver{ row := &ValuesSaver{
Schema: schema, Schema: schema,
Row: []Value{"name", nil}, Row: []Value{nil, []Value{}, []Value{nil}},
} }
if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil { if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
t.Fatal(putError(err)) t.Fatal(putError(err))
@ -899,20 +1042,91 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID)) q := client.Query(fmt.Sprintf("select name from %s", table.TableID))
q.DefaultProjectID = dataset.ProjectID q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID q.DefaultDatasetID = dataset.DatasetID
it, err := q.Read(ctx) it, err := q.Read(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
type S struct{ Num int64 } type S struct{ Name string }
var s S var s S
if err := it.Next(&s); err == nil { if err := it.Next(&s); err == nil {
t.Fatal("got nil, want error") t.Fatal("got nil, want error")
} }
} }
const (
stdName = "`bigquery-public-data.samples.shakespeare`"
legacyName = "[bigquery-public-data:samples.shakespeare]"
)
// These tests exploit the fact that the two SQL versions have different syntaxes for
// fully-qualified table names.
var useLegacySqlTests = []struct {
t string // name of table
std, legacy bool // use standard/legacy SQL
err bool // do we expect an error?
}{
{t: legacyName, std: false, legacy: true, err: false},
{t: legacyName, std: true, legacy: false, err: true},
{t: legacyName, std: false, legacy: false, err: false}, // legacy SQL is default
{t: legacyName, std: true, legacy: true, err: true},
{t: stdName, std: false, legacy: true, err: true},
{t: stdName, std: true, legacy: false, err: false},
{t: stdName, std: false, legacy: false, err: true}, // legacy SQL is default
{t: stdName, std: true, legacy: true, err: true},
}
func TestIntegration_QueryUseLegacySQL(t *testing.T) {
// Test the UseLegacySQL and UseStandardSQL options for queries.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
for _, test := range useLegacySqlTests {
q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
q.UseStandardSQL = test.std
q.UseLegacySQL = test.legacy
_, err := q.Read(ctx)
gotErr := err != nil
if gotErr && !test.err {
t.Errorf("%+v:\nunexpected error: %v", test, err)
} else if !gotErr && test.err {
t.Errorf("%+v:\nsucceeded, but want error", test)
}
}
}
func TestIntegration_TableUseLegacySQL(t *testing.T) {
// Test the UseLegacySQL and UseStandardSQL options for CreateTable.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
table := newTable(t, schema)
defer table.Delete(ctx)
for i, test := range useLegacySqlTests {
view := dataset.Table(fmt.Sprintf("t_view_%d", i))
vq := ViewQuery(fmt.Sprintf("SELECT word from %s", test.t))
opts := []CreateTableOption{vq}
if test.std {
opts = append(opts, UseStandardSQL())
}
if test.legacy {
opts = append(opts, UseLegacySQL())
}
err := view.Create(ctx, opts...)
gotErr := err != nil
if gotErr && !test.err {
t.Errorf("%+v:\nunexpected error: %v", test, err)
} else if !gotErr && test.err {
t.Errorf("%+v:\nsucceeded, but want error", test)
}
view.Delete(ctx)
}
}
// Creates a new, temporary table with a unique name and the given schema. // Creates a new, temporary table with a unique name and the given schema.
func newTable(t *testing.T, s Schema) *Table { func newTable(t *testing.T, s Schema) *Table {
name := fmt.Sprintf("t%d", time.Now().UnixNano()) name := fmt.Sprintf("t%d", time.Now().UnixNano())
@ -943,7 +1157,7 @@ func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
gotRow := []Value(r) gotRow := []Value(r)
wantRow := want[i] wantRow := want[i]
if !testutil.Equal(gotRow, wantRow) { if !testutil.Equal(gotRow, wantRow) {
return fmt.Sprintf("#%d: got %v, want %v", i, gotRow, wantRow), false return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false
} }
} }
return "", true return "", true

View file

@ -70,7 +70,7 @@ type RowIterator struct {
// //
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then // If dst is a *map[string]Value, a new map will be created if dst is nil. Then
// for each schema column name, the map key of that name will be set to the column's // for each schema column name, the map key of that name will be set to the column's
// value. // value. STRUCT types (RECORD types or nested schemas) become nested maps.
// //
// If dst is pointer to a struct, each column in the schema will be matched // If dst is pointer to a struct, each column in the schema will be matched
// with an exported field of the struct that has the same name, ignoring case. // with an exported field of the struct that has the same name, ignoring case.
@ -89,8 +89,8 @@ type RowIterator struct {
// TIME civil.Time // TIME civil.Time
// DATETIME civil.DateTime // DATETIME civil.DateTime
// //
// A repeated field corresponds to a slice or array of the element type. // A repeated field corresponds to a slice or array of the element type. A STRUCT
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer. // type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type. // All calls to Next on the same iterator must use the same struct type.
// //
// It is an error to attempt to read a BigQuery NULL value into a struct field. // It is an error to attempt to read a BigQuery NULL value into a struct field.

View file

@ -16,6 +16,9 @@ package bigquery
import ( import (
"errors" "errors"
"math/rand"
"os"
"sync"
"time" "time"
"cloud.google.com/go/internal" "cloud.google.com/go/internal"
@ -46,6 +49,7 @@ func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
return job, nil return job, nil
} }
// ID returns the job's ID.
func (j *Job) ID() string { func (j *Job) ID() string {
return j.jobID return j.jobID
} }
@ -77,17 +81,35 @@ type JobStatus struct {
// projectID must be non-empty. // projectID must be non-empty.
func setJobRef(job *bq.Job, jobID, projectID string) { func setJobRef(job *bq.Job, jobID, projectID string) {
if jobID == "" { if jobID == "" {
return // Generate an ID on the client so that insertJob can be idempotent.
jobID = randomJobID()
} }
// We don't check whether projectID is empty; the server will return an // We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference. // error when it encounters the resulting JobReference.
job.JobReference = &bq.JobReference{ job.JobReference = &bq.JobReference{
JobId: jobID, JobId: jobID,
ProjectId: projectID, ProjectId: projectID,
} }
} }
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var (
rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
)
func randomJobID() string {
// As of August 2017, the BigQuery service uses 27 alphanumeric characters.
var b [27]byte
rngMu.Lock()
for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))]
}
rngMu.Unlock()
return string(b[:])
}
// Done reports whether the job has completed. // Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccesfully. // After Done returns true, the Err method will return an error if the job completed unsuccesfully.
func (s *JobStatus) Done() bool { func (s *JobStatus) Done() bool {

View file

@ -56,6 +56,8 @@ type LoadSource interface {
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. // LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
// The returned Loader may optionally be further configured before its Run method is called. // The returned Loader may optionally be further configured before its Run method is called.
// See GCSReference and ReaderSource for additional configuration options that
// affect loading.
func (t *Table) LoaderFrom(src LoadSource) *Loader { func (t *Table) LoaderFrom(src LoadSource) *Loader {
return &Loader{ return &Loader{
c: t.c, c: t.c,

View file

@ -20,17 +20,16 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
func defaultLoadJob() *bq.Job { func defaultLoadJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{ Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{ DestinationTable: &bq.TableReference{
ProjectId: "project-id", ProjectId: "client-project-id",
DatasetId: "dataset-id", DatasetId: "dataset-id",
TableId: "table-id", TableId: "table-id",
}, },
@ -68,7 +67,7 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
} }
func TestLoad(t *testing.T) { func TestLoad(t *testing.T) {
c := &Client{projectID: "project-id"} c := &Client{projectID: "client-project-id"}
testCases := []struct { testCases := []struct {
dst *Table dst *Table
@ -95,7 +94,7 @@ func TestLoad(t *testing.T) {
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.JobReference = &bq.JobReference{ j.JobReference = &bq.JobReference{
JobId: "ajob", JobId: "ajob",
ProjectId: "project-id", ProjectId: "client-project-id",
} }
return j return j
}(), }(),
@ -218,12 +217,9 @@ func TestLoad(t *testing.T) {
tc.config.Dst = tc.dst tc.config.Dst = tc.dst
loader.LoadConfig = tc.config loader.LoadConfig = tc.config
if _, err := loader.Run(context.Background()); err != nil { if _, err := loader.Run(context.Background()); err != nil {
t.Errorf("%d: err calling Loader.Run: %v", i, err) t.Errorf("#%d: err calling Loader.Run: %v", i, err)
continue continue
} }
if !testutil.Equal(s.Job, tc.want) { checkJob(t, i, s.Job, tc.want)
t.Errorf("loading %d: got:\n%v\nwant:\n%v",
i, pretty.Value(s.Job), pretty.Value(tc.want))
}
} }
} }

View file

@ -15,6 +15,8 @@
package bigquery package bigquery
import ( import (
"errors"
"golang.org/x/net/context" "golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2" bq "google.golang.org/api/bigquery/v2"
) )
@ -89,6 +91,9 @@ type QueryConfig struct {
// The default is false (using legacy SQL). // The default is false (using legacy SQL).
UseStandardSQL bool UseStandardSQL bool
// UseLegacySQL causes the query to use legacy SQL.
UseLegacySQL bool
// Parameters is a list of query parameters. The presence of parameters // Parameters is a list of query parameters. The presence of parameters
// implies the use of standard SQL. // implies the use of standard SQL.
// If the query uses positional syntax ("?"), then no parameter may have a name. // If the query uses positional syntax ("?"), then no parameter may have a name.
@ -177,11 +182,19 @@ func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) err
if q.MaxBytesBilled >= 1 { if q.MaxBytesBilled >= 1 {
conf.MaximumBytesBilled = q.MaxBytesBilled conf.MaximumBytesBilled = q.MaxBytesBilled
} }
if q.UseStandardSQL && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(q.Parameters) > 0 && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
if q.UseStandardSQL || len(q.Parameters) > 0 { if q.UseStandardSQL || len(q.Parameters) > 0 {
conf.UseLegacySql = false conf.UseLegacySql = false
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql") conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
} }
if q.UseLegacySQL {
conf.UseLegacySql = true
}
if q.Dst != nil && !q.Dst.implicitTable() { if q.Dst != nil && !q.Dst.implicitTable() {
conf.DestinationTable = q.Dst.tableRefProto() conf.DestinationTable = q.Dst.tableRefProto()
} }

View file

@ -15,6 +15,7 @@
package bigquery package bigquery
import ( import (
"fmt"
"testing" "testing"
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
@ -26,10 +27,11 @@ import (
func defaultQueryJob() *bq.Job { func defaultQueryJob() *bq.Job {
return &bq.Job{ return &bq.Job{
JobReference: &bq.JobReference{ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{ Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{ Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{ DestinationTable: &bq.TableReference{
ProjectId: "project-id", ProjectId: "client-project-id",
DatasetId: "dataset-id", DatasetId: "dataset-id",
TableId: "table-id", TableId: "table-id",
}, },
@ -45,7 +47,7 @@ func defaultQueryJob() *bq.Job {
func TestQuery(t *testing.T) { func TestQuery(t *testing.T) {
c := &Client{ c := &Client{
projectID: "project-id", projectID: "client-project-id",
} }
testCases := []struct { testCases := []struct {
dst *Table dst *Table
@ -144,6 +146,7 @@ func TestQuery(t *testing.T) {
}, },
want: func() *bq.Job { want: func() *bq.Job {
j := defaultQueryJob() j := defaultQueryJob()
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER" j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j return j
@ -251,19 +254,17 @@ func TestQuery(t *testing.T) {
}(), }(),
}, },
} }
for _, tc := range testCases { for i, tc := range testCases {
s := &testService{} s := &testService{}
c.service = s c.service = s
query := c.Query("") query := c.Query("")
query.QueryConfig = *tc.src query.QueryConfig = *tc.src
query.Dst = tc.dst query.Dst = tc.dst
if _, err := query.Run(context.Background()); err != nil { if _, err := query.Run(context.Background()); err != nil {
t.Errorf("err calling query: %v", err) t.Errorf("#%d: err calling query: %v", i, err)
continue continue
} }
if !testutil.Equal(s.Job, tc.want) { checkJob(t, i, s.Job, tc.want)
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
} }
} }
@ -304,3 +305,26 @@ func TestConfiguringQuery(t *testing.T) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want) t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want)
} }
} }
func TestQueryLegacySQL(t *testing.T) {
c := &Client{
projectID: "project-id",
service: &testService{},
}
q := c.Query("q")
q.UseStandardSQL = true
q.UseLegacySQL = true
_, err := q.Run(context.Background())
if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
}
q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true
_, err = q.Run(context.Background())
if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error")
} else {
fmt.Println(err)
}
}

View file

@ -15,13 +15,14 @@
package bigquery package bigquery
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"sync"
"time" "time"
"cloud.google.com/go/internal" "cloud.google.com/go/internal"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/version" "cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
@ -48,7 +49,7 @@ type service interface {
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated. // listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error)
// Table data // Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
@ -58,6 +59,7 @@ type service interface {
insertDataset(ctx context.Context, datasetID, projectID string) error insertDataset(ctx context.Context, datasetID, projectID string) error
deleteDataset(ctx context.Context, datasetID, projectID string) error deleteDataset(ctx context.Context, datasetID, projectID string) error
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error)
// Misc // Misc
@ -177,7 +179,6 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
// Prepare request to fetch one page of table data. // Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID) req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
setClientHeader(req.Header()) setClientHeader(req.Header())
if pageToken != "" { if pageToken != "" {
req.PageToken(pageToken) req.PageToken(pageToken)
} else { } else {
@ -189,33 +190,37 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
} }
// Fetch the table schema in the background, if necessary. // Fetch the table schema in the background, if necessary.
var schemaErr error errc := make(chan error, 1)
var schemaFetch sync.WaitGroup if conf.schema != nil {
if conf.schema == nil { errc <- nil
schemaFetch.Add(1) } else {
go func() { go func() {
defer schemaFetch.Done()
var t *bq.Table var t *bq.Table
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID). err := runWithRetry(ctx, func() (err error) {
Fields("schema"). t, err = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Context(ctx). Fields("schema").
Do() Context(ctx).
if schemaErr == nil && t.Schema != nil { Do()
return err
})
if err == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema) conf.schema = convertTableSchema(t.Schema)
} }
errc <- err
}() }()
} }
var res *bq.TableDataList
res, err := req.Context(ctx).Do() err := runWithRetry(ctx, func() (err error) {
res, err = req.Context(ctx).Do()
return err
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = <-errc
schemaFetch.Wait() if err != nil {
if schemaErr != nil { return nil, err
return nil, schemaErr
} }
result := &readDataResult{ result := &readDataResult{
pageToken: res.PageToken, pageToken: res.PageToken,
totalRows: uint64(res.TotalRows), totalRows: uint64(res.TotalRows),
@ -341,11 +346,12 @@ func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) { func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job var job *bq.Job
call := s.s.Jobs.Get(projectID, jobID).
Fields(fields...).
Context(ctx)
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) { err := runWithRetry(ctx, func() (err error) {
job, err = s.s.Jobs.Get(projectID, jobID). job, err = call.Do()
Fields(fields...).
Context(ctx).
Do()
return err return err
}) })
if err != nil { if err != nil {
@ -360,11 +366,12 @@ func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string
// docs: "This call will return immediately, and the client will need // docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed // to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status. // successfully". So it would be misleading to return a status.
call := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error { return runWithRetry(ctx, func() error {
_, err := s.s.Jobs.Cancel(projectID, jobID). _, err := call.Do()
Fields(). // We don't need any of the response data.
Context(ctx).
Do()
return err return err
}) })
} }
@ -495,6 +502,7 @@ type createTableConf struct {
viewQuery string viewQuery string
schema *bq.TableSchema schema *bq.TableSchema
useStandardSQL bool useStandardSQL bool
useLegacySQL bool
timePartitioning *TimePartitioning timePartitioning *TimePartitioning
} }
@ -504,6 +512,9 @@ type createTableConf struct {
// Note: expiration can only be set during table creation. // Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view. // Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error { func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
if conf.useStandardSQL && conf.useLegacySQL {
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
table := &bq.Table{ table := &bq.Table{
// TODO(jba): retry? Is this always idempotent? // TODO(jba): retry? Is this always idempotent?
TableReference: &bq.TableReference{ TableReference: &bq.TableReference{
@ -524,6 +535,9 @@ func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf
table.View.UseLegacySql = false table.View.UseLegacySql = false
table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql") table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql")
} }
if conf.useLegacySQL {
table.View.UseLegacySql = true
}
} }
if conf.schema != nil { if conf.schema != nil {
table.Schema = conf.schema table.Schema = conf.schema
@ -572,6 +586,7 @@ func bqTableToMetadata(t *bq.Table) *TableMetadata {
ExpirationTime: unixMillisToTime(t.ExpirationTime), ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime), CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
} }
if t.Schema != nil { if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema) md.Schema = convertTableSchema(t.Schema)
@ -605,6 +620,7 @@ func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
ID: d.Id, ID: d.Id,
Location: d.Location, Location: d.Location,
Labels: d.Labels, Labels: d.Labels,
ETag: d.Etag,
} }
} }
@ -629,12 +645,13 @@ func convertTableReference(tr *bq.TableReference) *Table {
// patchTableConf contains fields to be patched. // patchTableConf contains fields to be patched.
type patchTableConf struct { type patchTableConf struct {
// These fields are omitted from the patch operation if nil. // These fields are omitted from the patch operation if nil.
Description *string Description *string
Name *string Name *string
Schema Schema Schema Schema
ExpirationTime time.Time
} }
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) { func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error) {
t := &bq.Table{} t := &bq.Table{}
forceSend := func(field string) { forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field) t.ForceSendFields = append(t.ForceSendFields, field)
@ -652,9 +669,16 @@ func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID,
t.Schema = conf.Schema.asTableSchema() t.Schema = conf.Schema.asTableSchema()
forceSend("Schema") forceSend("Schema")
} }
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t). if !conf.ExpirationTime.IsZero() {
Context(ctx). t.ExpirationTime = conf.ExpirationTime.UnixNano() / 1e6
Do() forceSend("ExpirationTime")
}
call := s.s.Tables.Patch(projectID, datasetID, tableID, t).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
table, err := call.Do()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -672,6 +696,58 @@ func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectI
return err return err
} }
func (s *bigqueryService) patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
ds := bqDatasetFromMetadata(dm)
call := s.s.Datasets.Patch(projectID, datasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
ds2, err := call.Do()
if err != nil {
return nil, err
}
return bqDatasetToMetadata(ds2), nil
}
func bqDatasetFromMetadata(dm *DatasetMetadataToUpdate) *bq.Dataset {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur.Seconds() * 1000)
}
}
if dm.setLabels != nil || dm.deleteLabels != nil {
ds.Labels = map[string]string{}
for k, v := range dm.setLabels {
ds.Labels[k] = v
}
if len(ds.Labels) == 0 && len(dm.deleteLabels) > 0 {
forceSend("Labels")
}
for l := range dm.deleteLabels {
ds.NullFields = append(ds.NullFields, "Labels."+l)
}
}
return ds
}
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error { func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx) req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
setClientHeader(req.Header()) setClientHeader(req.Header())

View file

@ -73,6 +73,7 @@ func TestBQTableToMetadata(t *testing.T) {
EstimatedRows: 3, EstimatedRows: 3,
OldestEntryTime: aTime, OldestEntryTime: aTime,
}, },
ETag: "etag",
}, },
}, },
} { } {
@ -82,3 +83,26 @@ func TestBQTableToMetadata(t *testing.T) {
} }
} }
} }
func TestBQDatasetFromMetadata(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got := bqDatasetFromMetadata(&dm)
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}

View file

@ -69,6 +69,10 @@ type TableMetadata struct {
// present. This field will be nil if the table is not being streamed to or if // present. This field will be nil if the table is not being streamed to or if
// there is no data in the streaming buffer. // there is no data in the streaming buffer.
StreamingBuffer *StreamingBuffer StreamingBuffer *StreamingBuffer
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
} }
// TableCreateDisposition specifies the circumstances under which destination table will be created. // TableCreateDisposition specifies the circumstances under which destination table will be created.
@ -144,6 +148,8 @@ func (t *Table) implicitTable() bool {
} }
// Create creates a table in the BigQuery service. // Create creates a table in the BigQuery service.
// To create a table with a schema, pass in a Schema to Create;
// Schema is a valid CreateTableOption.
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error { func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
conf := &createTableConf{ conf := &createTableConf{
projectID: t.ProjectID, projectID: t.ProjectID,
@ -200,6 +206,16 @@ func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) {
conf.useStandardSQL = true conf.useStandardSQL = true
} }
type useLegacySQL struct{}
// UseLegacySQL returns a CreateTableOption to set the table to use legacy SQL.
// This is currently the default.
func UseLegacySQL() CreateTableOption { return useLegacySQL{} }
func (opt useLegacySQL) customizeCreateTable(conf *createTableConf) {
conf.useLegacySQL = true
}
// TimePartitioning is a CreateTableOption that can be used to set time-based // TimePartitioning is a CreateTableOption that can be used to set time-based
// date partitioning on a table. // date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables // For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables
@ -223,7 +239,7 @@ func (t *Table) Read(ctx context.Context) *RowIterator {
} }
// Update modifies specific Table metadata fields. // Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) { func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
var conf patchTableConf var conf patchTableConf
if tm.Description != nil { if tm.Description != nil {
s := optional.ToString(tm.Description) s := optional.ToString(tm.Description)
@ -234,7 +250,8 @@ func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMet
conf.Name = &s conf.Name = &s
} }
conf.Schema = tm.Schema conf.Schema = tm.Schema
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf) conf.ExpirationTime = tm.ExpirationTime
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf, etag)
} }
// TableMetadataToUpdate is used when updating a table's metadata. // TableMetadataToUpdate is used when updating a table's metadata.
@ -250,4 +267,7 @@ type TableMetadataToUpdate struct {
// When updating a schema, you can add columns but not remove them. // When updating a schema, you can add columns but not remove them.
Schema Schema Schema Schema
// TODO(jba): support updating the view // TODO(jba): support updating the view
// ExpirationTime is the time when this table expires.
ExpirationTime time.Time
} }

View file

@ -30,6 +30,8 @@ import (
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/grpc/codes"
) )
const adminAddr = "bigtableadmin.googleapis.com:443" const adminAddr = "bigtableadmin.googleapis.com:443"
@ -379,6 +381,11 @@ func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo,
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(res.FailedLocations) > 0 {
// We don't have a good way to return a partial result in the face of some zones being unavailable.
// Fail the entire request.
return nil, status.Errorf(codes.Unavailable, "Failed locations: %v", res.FailedLocations)
}
var is []*InstanceInfo var is []*InstanceInfo
for _, i := range res.Instances { for _, i := range res.Instances {

View file

@ -53,7 +53,13 @@ func NewClient(ctx context.Context, project, instance string, opts ...option.Cli
return nil, err return nil, err
} }
// Default to a small connection pool that can be overridden. // Default to a small connection pool that can be overridden.
o = append(o, option.WithGRPCConnectionPool(4)) o = append(o,
option.WithGRPCConnectionPool(4),
// Set the max size to correspond to server-side limits.
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
o = append(o, opts...) o = append(o, opts...)
conn, err := gtransport.Dial(ctx, o...) conn, err := gtransport.Dial(ctx, o...)
if err != nil { if err != nil {
@ -211,6 +217,7 @@ func decodeFamilyProto(r Row, row string, f *btpb.Family) {
} }
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList. // RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
// The serialized size of the RowSet must be no larger than 1MiB.
type RowSet interface { type RowSet interface {
proto() *btpb.RowSet proto() *btpb.RowSet
@ -391,6 +398,9 @@ type ReadOption interface {
} }
// RowFilter returns a ReadOption that applies f to the contents of read rows. // RowFilter returns a ReadOption that applies f to the contents of read rows.
//
// If multiple RowFilters are provided, only the last is used. To combine filters,
// use ChainFilters or InterleaveFilters instead.
func RowFilter(f Filter) ReadOption { return rowFilter{f} } func RowFilter(f Filter) ReadOption { return rowFilter{f} }
type rowFilter struct{ f Filter } type rowFilter struct{ f Filter }
@ -571,7 +581,7 @@ type entryErr struct {
Err error Err error
} }
// ApplyBulk applies multiple Mutations. // ApplyBulk applies multiple Mutations, up to a maximum of 100,000.
// Each mutation is individually applied atomically, // Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order. // but the set of mutations may be applied in any order.
// //

View file

@ -448,6 +448,24 @@ func TestClientIntegration(t *testing.T) {
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
} }
} }
// Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator.
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0}))
if err != nil {
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
}
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0}))
if err != nil {
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
}
// Get only the correct row back on read.
r, err := tbl.ReadRow(ctx, "issue-723-1")
if err != nil {
t.Fatalf("Reading row: %v", err)
}
if r.Key() != "issue-723-1" {
t.Errorf("ApplyReadModifyWrite: incorrect read after RMW,\n got %v\nwant %v", r.Key(), "issue-723-1")
}
checkpoint("tested ReadModifyWrite") checkpoint("tested ReadModifyWrite")
// Test arbitrary timestamps more thoroughly. // Test arbitrary timestamps more thoroughly.
@ -465,7 +483,7 @@ func TestClientIntegration(t *testing.T) {
if err := tbl.Apply(ctx, "testrow", mut); err != nil { if err := tbl.Apply(ctx, "testrow", mut); err != nil {
t.Fatalf("Mutating row: %v", err) t.Fatalf("Mutating row: %v", err)
} }
r, err := tbl.ReadRow(ctx, "testrow") r, err = tbl.ReadRow(ctx, "testrow")
if err != nil { if err != nil {
t.Fatalf("Reading row: %v", err) t.Fatalf("Reading row: %v", err)
} }
@ -758,7 +776,7 @@ func TestClientIntegration(t *testing.T) {
checkpoint("tested high concurrency") checkpoint("tested high concurrency")
// Large reads, writes and scans. // Large reads, writes and scans.
bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB. bigBytes := make([]byte, 5<<20) // 5 MB is larger than current default gRPC max of 4 MB, but less than the max we set.
nonsense := []byte("lorem ipsum dolor sit amet, ") nonsense := []byte("lorem ipsum dolor sit amet, ")
fill(bigBytes, nonsense) fill(bigBytes, nonsense)
mut = NewMutation() mut = NewMutation()

View file

@ -277,7 +277,6 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
// Rows to read can be specified by a set of row keys and/or a set of row ranges. // Rows to read can be specified by a set of row keys and/or a set of row ranges.
// Output is a stream of sorted, de-duped rows. // Output is a stream of sorted, de-duped rows.
tbl.mu.RLock() tbl.mu.RLock()
rowSet := make(map[string]*row) rowSet := make(map[string]*row)
if req.Rows != nil { if req.Rows != nil {
// Add the explicitly given keys // Add the explicitly given keys
@ -624,9 +623,8 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt
if !ok { if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
} }
fs := tbl.columnFamilies() fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey)) r, _ := tbl.mutableRow(string(req.RowKey))
r.mu.Lock() r.mu.Lock()
defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
defer r.mu.Unlock() defer r.mu.Unlock()
@ -643,14 +641,13 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
if !ok { if !ok {
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
} }
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
fs := tbl.columnFamilies() fs := tbl.columnFamilies()
defer tbl.resortRowIndex() defer tbl.resortRowIndex()
for i, entry := range req.Entries { for i, entry := range req.Entries {
r := tbl.mutableRow(string(entry.RowKey)) r, _ := tbl.mutableRow(string(entry.RowKey))
r.mu.Lock() r.mu.Lock()
code, msg := int32(codes.OK), "" code, msg := int32(codes.OK), ""
if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil { if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
@ -674,12 +671,11 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
if !ok { if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
} }
res := &btpb.CheckAndMutateRowResponse{} res := &btpb.CheckAndMutateRowResponse{}
fs := tbl.columnFamilies() fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey)) r, _ := tbl.mutableRow(string(req.RowKey))
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
@ -830,12 +826,16 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
if !ok { if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
} }
updates := make(map[string]cell) // copy of updated cells; keyed by full column name updates := make(map[string]cell) // copy of updated cells; keyed by full column name
fs := tbl.columnFamilies() fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey)) rowKey := string(req.RowKey)
r, isNewRow := tbl.mutableRow(rowKey)
// This must be done before the row lock, acquired below, is released.
if isNewRow {
defer tbl.resortRowIndex()
}
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
// Assume all mutations apply to the most recent version of the cell. // Assume all mutations apply to the most recent version of the cell.
@ -1025,13 +1025,13 @@ func (t *table) columnFamilies() map[string]*columnFamily {
return cp return cp
} }
func (t *table) mutableRow(row string) *row { func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) {
// Try fast path first. // Try fast path first.
t.mu.RLock() t.mu.RLock()
r := t.rowIndex[row] r := t.rowIndex[row]
t.mu.RUnlock() t.mu.RUnlock()
if r != nil { if r != nil {
return r return r, false
} }
// We probably need to create the row. // We probably need to create the row.
@ -1043,7 +1043,7 @@ func (t *table) mutableRow(row string) *row {
t.rows = append(t.rows, r) t.rows = append(t.rows, r)
} }
t.mu.Unlock() t.mu.Unlock()
return r return r, true
} }
func (t *table) resortRowIndex() { func (t *table) resortRowIndex() {

View file

@ -36,6 +36,7 @@ import (
"cloud.google.com/go/bigtable/internal/stat" "cloud.google.com/go/bigtable/internal/stat"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/option" "google.golang.org/api/option"
"google.golang.org/grpc"
) )
var ( var (
@ -74,7 +75,12 @@ func main() {
var options []option.ClientOption var options []option.ClientOption
if *poolSize > 1 { if *poolSize > 1 {
options = append(options, option.WithGRPCConnectionPool(*poolSize)) options = append(options,
option.WithGRPCConnectionPool(*poolSize),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
} }
var csvFile *os.File var csvFile *os.File

View file

@ -98,7 +98,7 @@ type EmulatedEnv struct {
// NewEmulatedEnv builds and starts the emulator based environment // NewEmulatedEnv builds and starts the emulator based environment
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
srv, err := bttest.NewServer("127.0.0.1:0") srv, err := bttest.NewServer("127.0.0.1:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -152,7 +152,7 @@ func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
func (e *EmulatedEnv) NewClient() (*Client, error) { func (e *EmulatedEnv) NewClient() (*Client, error) {
timeout := 20 * time.Second timeout := 20 * time.Second
ctx, _ := context.WithTimeout(context.Background(), timeout) ctx, _ := context.WithTimeout(context.Background(), timeout)
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build linux // +build linux,go1.7
package main package main

118
vendor/cloud.google.com/go/datastore/client.go generated vendored Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
gax "github.com/googleapis/gax-go"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Lookup(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.RunQuery(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.BeginTransaction(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Commit(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Rollback(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.AllocateIds(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error {
ctx = metadata.NewOutgoingContext(ctx, dc.md)
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = f(ctx)
return !shouldRetry(err), err
})
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
s, ok := status.FromError(err)
if !ok {
return false
}
// See https://cloud.google.com/datastore/docs/concepts/errors.
return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded
}

View file

@ -21,15 +21,11 @@ import (
"os" "os"
"reflect" "reflect"
"cloud.google.com/go/internal/version"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/option" "google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc" gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/datastore/v1" pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/metadata"
) )
const ( const (
@ -44,56 +40,6 @@ const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// the resource being operated on. // the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix" const resourcePrefixHeader = "google-cloud-resource-prefix"
// protoClient is an interface for *transport.ProtoClient to support injecting
// fake clients in tests.
type protoClient interface {
Call(context.Context, string, proto.Message, proto.Message) error
}
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
return dc.c.Lookup(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
return dc.c.RunQuery(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
return dc.c.BeginTransaction(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
return dc.c.Commit(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
return dc.c.Rollback(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
return dc.c.AllocateIds(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
// Client is a client for reading and writing data in a datastore dataset. // Client is a client for reading and writing data in a datastore dataset.
type Client struct { type Client struct {
conn *grpc.ClientConn conn *grpc.ClientConn
@ -201,7 +147,6 @@ func keyToProto(k *Key) *pb.Key {
return nil return nil
} }
// TODO(jbd): Eliminate unrequired allocations.
var path []*pb.Key_PathElement var path []*pb.Key_PathElement
for { for {
el := &pb.Key_PathElement{Kind: k.Kind} el := &pb.Key_PathElement{Kind: k.Kind}
@ -210,12 +155,19 @@ func keyToProto(k *Key) *pb.Key {
} else if k.Name != "" { } else if k.Name != "" {
el.IdType = &pb.Key_PathElement_Name{Name: k.Name} el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
} }
path = append([]*pb.Key_PathElement{el}, path...) path = append(path, el)
if k.Parent == nil { if k.Parent == nil {
break break
} }
k = k.Parent k = k.Parent
} }
// The path should be in order [grandparent, parent, child]
// We did it backward above, so reverse back.
for i := 0; i < len(path)/2; i++ {
path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
}
key := &pb.Key{Path: path} key := &pb.Key{Path: path}
if k.Namespace != "" { if k.Namespace != "" {
key.PartitionId = &pb.PartitionId{ key.PartitionId = &pb.PartitionId{
@ -390,17 +342,21 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
return nil return nil
} }
// Go through keys, validate them, serialize then, and create a dict mapping them to their index // Go through keys, validate them, serialize then, and create a dict mapping them to their indices.
// Equal keys are deduped.
multiErr, any := make(MultiError, len(keys)), false multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string]int) keyMap := make(map[string][]int, len(keys))
pbKeys := make([]*pb.Key, len(keys)) pbKeys := make([]*pb.Key, 0, len(keys))
for i, k := range keys { for i, k := range keys {
if !k.valid() { if !k.valid() {
multiErr[i] = ErrInvalidKey multiErr[i] = ErrInvalidKey
any = true any = true
} else { } else {
keyMap[k.String()] = i ks := k.String()
pbKeys[i] = keyToProto(k) if _, ok := keyMap[ks]; !ok {
pbKeys = append(pbKeys, keyToProto(k))
}
keyMap[ks] = append(keyMap[ks], i)
} }
} }
if any { if any {
@ -434,25 +390,26 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
found = append(found, resp.Found...) found = append(found, resp.Found...)
missing = append(missing, resp.Missing...) missing = append(missing, resp.Missing...)
} }
if len(keys) != len(found)+len(missing) {
return errors.New("datastore: internal error: server returned the wrong number of entities") filled := 0
}
for _, e := range found { for _, e := range found {
k, err := protoToKey(e.Entity.Key) k, err := protoToKey(e.Entity.Key)
if err != nil { if err != nil {
return errors.New("datastore: internal error: server returned an invalid key") return errors.New("datastore: internal error: server returned an invalid key")
} }
index := keyMap[k.String()] filled += len(keyMap[k.String()])
elem := v.Index(index) for _, index := range keyMap[k.String()] {
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem := v.Index(index)
elem = elem.Addr() if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
} elem = elem.Addr()
if multiArgType == multiArgTypeStructPtr && elem.IsNil() { }
elem.Set(reflect.New(elem.Type().Elem())) if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
} elem.Set(reflect.New(elem.Type().Elem()))
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil { }
multiErr[index] = err if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
any = true multiErr[index] = err
any = true
}
} }
} }
for _, e := range missing { for _, e := range missing {
@ -460,9 +417,17 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
if err != nil { if err != nil {
return errors.New("datastore: internal error: server returned an invalid key") return errors.New("datastore: internal error: server returned an invalid key")
} }
multiErr[keyMap[k.String()]] = ErrNoSuchEntity filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
multiErr[index] = ErrNoSuchEntity
}
any = true any = true
} }
if filled != len(keys) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
if any { if any {
return multiErr return multiErr
} }
@ -592,13 +557,18 @@ func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys)) mutations := make([]*pb.Mutation, 0, len(keys))
set := make(map[string]bool, len(keys))
for _, k := range keys { for _, k := range keys {
if k.Incomplete() { if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
} }
mutations = append(mutations, &pb.Mutation{ ks := k.String()
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}, if !set[ks] {
}) mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
}
set[ks] = true
} }
return mutations, nil return mutations, nil
} }

View file

@ -170,6 +170,7 @@ func TestGetMulti(t *testing.T) {
{key: NameKey("X", "item1", p), put: true}, {key: NameKey("X", "item1", p), put: true},
{key: NameKey("X", "item2", p), put: false}, {key: NameKey("X", "item2", p), put: false},
{key: NameKey("X", "item3", p), put: false}, {key: NameKey("X", "item3", p), put: false},
{key: NameKey("X", "item3", p), put: false},
{key: NameKey("X", "item4", p), put: true}, {key: NameKey("X", "item4", p), put: true},
} }
@ -1003,6 +1004,8 @@ func TestNilPointers(t *testing.T) {
t.Errorf("Get: err %v; want %v", err, want) t.Errorf("Get: err %v; want %v", err, want)
} }
// Test that deleting with duplicate keys work.
keys = append(keys, keys...)
if err := client.DeleteMulti(ctx, keys); err != nil { if err := client.DeleteMulti(ctx, keys); err != nil {
t.Errorf("Delete: %v", err) t.Errorf("Delete: %v", err)
} }

View file

@ -88,10 +88,10 @@ type Controller2Client struct {
// //
// The debugger agents register with the Controller to identify the application // The debugger agents register with the Controller to identify the application
// being debugged, the Debuggee. All agents that register with the same data, // being debugged, the Debuggee. All agents that register with the same data,
// represent the same Debuggee, and are assigned the same `debuggee_id`. // represent the same Debuggee, and are assigned the same debuggee_id.
// //
// The debugger agents call the Controller to retrieve the list of active // The debugger agents call the Controller to retrieve the list of active
// Breakpoints. Agents with the same `debuggee_id` get the same breakpoints // Breakpoints. Agents with the same debuggee_id get the same breakpoints
// list. An agent that can fulfill the breakpoint request updates the // list. An agent that can fulfill the breakpoint request updates the
// Controller with the breakpoint result. The controller selects the first // Controller with the breakpoint result. The controller selects the first
// result received and discards the rest of the results. // result received and discards the rest of the results.
@ -139,14 +139,14 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
// RegisterDebuggee registers the debuggee with the controller service. // RegisterDebuggee registers the debuggee with the controller service.
// //
// All agents attached to the same application should call this method with // All agents attached to the same application must call this method with
// the same request content to get back the same stable `debuggee_id`. Agents // exactly the same request content to get back the same stable debuggee_id.
// should call this method again whenever `google.rpc.Code.NOT_FOUND` is // Agents should call this method again whenever google.rpc.Code.NOT_FOUND
// returned from any controller method. // is returned from any controller method.
// //
// This allows the controller service to disable the agent or recover from any // This protocol allows the controller service to disable debuggees, recover
// data loss. If the debuggee is disabled by the server, the response will // from data loss, or change the debuggee_id format. Agents must handle
// have `is_disabled` set to `true`. // debuggee_id value changing upon re-registration.
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...) opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
@ -164,7 +164,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee. // ListActiveBreakpoints returns the list of all active breakpoints for the debuggee.
// //
// The breakpoint specification (location, condition, and expression // The breakpoint specification (location, condition, and expressions
// fields) is semantically immutable, although the field values may // fields) is semantically immutable, although the field values may
// change. For example, an agent may update the location line number // change. For example, an agent may update the location line number
// to reflect the actual line where the breakpoint was set, but this // to reflect the actual line where the breakpoint was set, but this
@ -191,12 +191,11 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
} }
// UpdateActiveBreakpoint updates the breakpoint state or mutable fields. // UpdateActiveBreakpoint updates the breakpoint state or mutable fields.
// The entire Breakpoint message must be sent back to the controller // The entire Breakpoint message must be sent back to the controller service.
// service.
// //
// Updates to active breakpoint fields are only allowed if the new value // Updates to active breakpoint fields are only allowed if the new value
// does not change the breakpoint specification. Updates to the `location`, // does not change the breakpoint specification. Updates to the location,
// `condition` and `expression` fields should not alter the breakpoint // condition and expressions fields should not alter the breakpoint
// semantics. These may only make changes such as canonicalizing a value // semantics. These may only make changes such as canonicalizing a value
// or snapping the location to the correct line of code. // or snapping the location to the correct line of code.
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {

View file

@ -91,9 +91,9 @@ type Debugger2Client struct {
// and without modifying its state. An application may include one or // and without modifying its state. An application may include one or
// more replicated processes performing the same work. // more replicated processes performing the same work.
// //
// The application is represented using the Debuggee concept. The Debugger // A debugged application is represented using the Debuggee concept. The
// service provides a way to query for available Debuggees, but does not // Debugger service provides a way to query for available debuggees, but does
// provide a way to create one. A debuggee is created using the Controller // not provide a way to create one. A debuggee is created using the Controller
// service, usually by running a debugger agent with the application. // service, usually by running a debugger agent with the application.
// //
// The Debugger service enables the client to set one or more Breakpoints on a // The Debugger service enables the client to set one or more Breakpoints on a
@ -193,7 +193,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
return resp, nil return resp, nil
} }
// ListDebuggees lists all the debuggees that the user can set breakpoints to. // ListDebuggees lists all the debuggees that the user has access to.
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) { func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...) opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)

View file

@ -35,8 +35,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -113,7 +113,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
client: dlppb.NewDlpServiceClient(conn), client: dlppb.NewDlpServiceClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil { if err != nil {
@ -139,10 +139,10 @@ func (c *Client) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) { func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -275,7 +275,7 @@ func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperati
// See documentation of Poll for error-handling information. // See documentation of Poll for error-handling information.
func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) { func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
var resp dlppb.InspectOperationResult var resp dlppb.InspectOperationResult
if err := op.lro.Wait(ctx, &resp, opts...); err != nil { if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err return nil, err
} }
return &resp, nil return &resp, nil

View file

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -20,6 +20,8 @@
// Stackdriver Error Reporting groups and counts similar errors from cloud // Stackdriver Error Reporting groups and counts similar errors from cloud
// services. The Stackdriver Error Reporting API provides a way to report new // services. The Stackdriver Error Reporting API provides a way to report new
// errors and read access to error groups and their associated errors. // errors and read access to error groups and their associated errors.
//
// Use the client at cloud.google.com/go/errorreporting in preference to this.
package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1" package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1"
import ( import (
@ -34,8 +36,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -29,10 +29,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
errorGroupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}")
)
// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient. // ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
type ErrorGroupCallOptions struct { type ErrorGroupCallOptions struct {
GetGroup []gax.CallOption GetGroup []gax.CallOption
@ -122,14 +118,12 @@ func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
// ErrorGroupGroupPath returns the path for the group resource. // ErrorGroupGroupPath returns the path for the group resource.
func ErrorGroupGroupPath(project, group string) string { func ErrorGroupGroupPath(project, group string) string {
path, err := errorGroupGroupPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"group": group, project +
}) "/groups/" +
if err != nil { group +
panic(err) ""
}
return path
} }
// GetGroup get the specified group. // GetGroup get the specified group.

View file

@ -31,10 +31,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
errorStatsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
)
// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient. // ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
type ErrorStatsCallOptions struct { type ErrorStatsCallOptions struct {
ListGroupStats []gax.CallOption ListGroupStats []gax.CallOption
@ -127,13 +123,10 @@ func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
// ErrorStatsProjectPath returns the path for the project resource. // ErrorStatsProjectPath returns the path for the project resource.
func ErrorStatsProjectPath(project string) string { func ErrorStatsProjectPath(project string) string {
path, err := errorStatsProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// ListGroupStats lists the specified groups. // ListGroupStats lists the specified groups.

View file

@ -26,10 +26,6 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
var (
reportErrorsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
)
// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient. // ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
type ReportErrorsCallOptions struct { type ReportErrorsCallOptions struct {
ReportErrorEvent []gax.CallOption ReportErrorEvent []gax.CallOption
@ -104,13 +100,10 @@ func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
// ReportErrorsProjectPath returns the path for the project resource. // ReportErrorsProjectPath returns the path for the project resource.
func ReportErrorsProjectPath(project string) string { func ReportErrorsProjectPath(project string) string {
path, err := reportErrorsProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// ReportErrorEvent report an individual error event. // ReportErrorEvent report an individual error event.
@ -119,8 +112,7 @@ func ReportErrorsProjectPath(project string) string {
// <strong>or</strong> an // <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a> // <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of // for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example: // a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) { func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...) opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)

View file

@ -0,0 +1,215 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
"cloud.google.com/go/logging"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
type fakeLogger struct {
entry *logging.Entry
fail bool
}
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
if c.fail {
return errors.New("request failed")
}
c.entry = &e
return nil
}
func (c *fakeLogger) Close() error {
return nil
}
func newTestClientUsingLogging(c *fakeLogger) *Client {
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
func TestCatchNothingUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func entryMessage(e *logging.Entry) string {
return e.Payload.(map[string]interface{})["message"].(string)
}
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
t.Errorf("error report didn't contain service name")
}
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(entryMessage(e), "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(entryMessage(e), panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchPanic")
if !strings.Contains(entryMessage(e), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReportsUsingLogging(t *testing.T) {
fl := &fakeLogger{fail: true}
c := newTestClientUsingLogging(fl)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchNilPanic")
if !strings.Contains(entryMessage(e), "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReportUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Report(ctx, nil, "hello, ", "error")
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReport")
}
func TestReportfUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReportf")
if !strings.Contains(entryMessage(e), "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

456
vendor/cloud.google.com/go/errorreporting/errors.go generated vendored Normal file
View file

@ -0,0 +1,456 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package errorreporting is a Google Stackdriver Error Reporting library.
//
// This package is still experimental and subject to change.
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errorreporting"
// ...
// errorsClient, err = errorreporting.NewClient(ctx, projectID, "myservice", "v1.0", true)
//
// The client can recover panics in your program and report them as errors.
// To use this functionality, defer its Catch method, as you would any other
// function for recovering panics.
//
// func foo(ctx context.Context, ...) {
// defer errorsClient.Catch(ctx)
// ...
// }
//
// Catch writes an error report containing the recovered value and a stack trace
// to Stackdriver Error Reporting.
//
// There are various options you can add to the call to Catch that modify how
// panics are handled.
//
// WithMessage and WithMessagef add a custom message after the recovered value,
// using fmt.Sprint and fmt.Sprintf respectively.
//
// defer errorsClient.Catch(ctx, errorreporting.WithMessagef("x=%d", x))
//
// WithRequest fills in various fields in the error report with information
// about an http.Request that's being handled.
//
// defer errorsClient.Catch(ctx, errorreporting.WithRequest(httpReq))
//
// By default, after recovering a panic, Catch will panic again with the
// recovered value. You can turn off this behavior with the Repanic option.
//
// defer errorsClient.Catch(ctx, errorreporting.Repanic(false))
//
// You can also change the default behavior for the client by changing the
// RepanicDefault field.
//
// errorsClient.RepanicDefault = false
//
// It is also possible to write an error report directly without recovering a
// panic, using Report or Reportf.
//
// if err != nil {
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
// }
//
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
package errorreporting // import "cloud.google.com/go/errorreporting"
import (
"bytes"
"fmt"
"log"
"net/http"
"runtime"
"strings"
"time"
api "cloud.google.com/go/errorreporting/apiv1beta1"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/logging"
"github.com/golang/protobuf/ptypes/timestamp"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const (
userAgent = `gcloud-golang-errorreporting/20160701`
)
type apiInterface interface {
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
Close() error
}
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
client, err := api.NewReportErrorsClient(ctx, opts...)
if err != nil {
return nil, err
}
client.SetGoogleClientInfo("gccl", version.Repo)
return client, nil
}
type loggerInterface interface {
LogSync(ctx context.Context, e logging.Entry) error
Close() error
}
type logger struct {
*logging.Logger
c *logging.Client
}
func (l logger) Close() error {
return l.c.Close()
}
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
lc, err := logging.NewClient(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
l := lc.Logger("errorreports")
return logger{l, lc}, nil
}
type sender interface {
send(ctx context.Context, r *http.Request, message string)
close() error
}
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
type errorApiSender struct {
apiClient apiInterface
projectID string
serviceContext erpb.ServiceContext
}
// loggingSender sends error reports using the Stackdriver Logging API.
type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
// panic. This behavior can be overridden for an individual call to Catch using
// the Repanic option.
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
sender := &loggingSender{
logger: l,
projectID: projectID,
serviceContext: map[string]string{
"service": serviceName,
},
}
if serviceVersion != "" {
sender.serviceContext["version"] = serviceVersion
}
c := &Client{
sender: sender,
RepanicDefault: true,
}
return c, nil
} else {
a, err := newApiInterface(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
}
c := &Client{
sender: &errorApiSender{
apiClient: a,
projectID: "projects/" + projectID,
serviceContext: erpb.ServiceContext{
Service: serviceName,
Version: serviceVersion,
},
},
RepanicDefault: true,
}
return c, nil
}
}
// Close closes any resources held by the client.
// Close should be called when the client is no longer needed.
// It need not be called at program exit.
func (c *Client) Close() error {
err := c.sender.close()
c.sender = nil
return err
}
// An Option is an optional argument to Catch.
type Option interface {
isOption()
}
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
// If *p is true when Catch is called, an error report is made even if recover
// returns nil. This allows Catch to report an error for panic(nil).
// If p is nil, the option is ignored.
//
// Here is an example of how to use PanicFlag:
//
// func foo(ctx context.Context, ...) {
// hasPanicked := true
// defer errorsClient.Catch(ctx, errorreporting.PanicFlag(&hasPanicked))
// ...
// ...
// // We have reached the end of the function, so we're not panicking.
// hasPanicked = false
// }
func PanicFlag(p *bool) Option { return panicFlag{p} }
type panicFlag struct {
*bool
}
func (h panicFlag) isOption() {}
// Repanic returns an Option that determines whether Catch will re-panic after
// it reports an error. This overrides the default in the client.
func Repanic(r bool) Option { return repanic(r) }
type repanic bool
func (r repanic) isOption() {}
// WithRequest returns an Option that informs Catch or Report of an http.Request
// that is being handled. Information from the Request is included in the error
// report, if one is made.
func WithRequest(r *http.Request) Option { return withRequest{r} }
type withRequest struct {
*http.Request
}
func (w withRequest) isOption() {}
// WithMessage returns an Option that sets a message to be included in the error
// report, if one is made. v is converted to a string with fmt.Sprint.
func WithMessage(v ...interface{}) Option { return message(v) }
type message []interface{}
func (m message) isOption() {}
// WithMessagef returns an Option that sets a message to be included in the error
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
type messagef struct {
format string
v []interface{}
}
func (m messagef) isOption() {}
// Catch tries to recover a panic; if it succeeds, it writes an error report.
// It should be called by deferring it, like any other function for recovering
// panics.
//
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Catch(ctx context.Context, opt ...Option) {
panicked := false
for _, o := range opt {
switch o := o.(type) {
case panicFlag:
panicked = panicked || o.bool != nil && *o.bool
}
}
x := recover()
if x == nil && !panicked {
return
}
var (
r *http.Request
shouldRepanic = true
messages = []string{fmt.Sprint(x)}
)
if c != nil {
shouldRepanic = c.RepanicDefault
}
for _, o := range opt {
switch o := o.(type) {
case repanic:
shouldRepanic = bool(o)
case withRequest:
r = o.Request
case message:
messages = append(messages, fmt.Sprint(o...))
case messagef:
messages = append(messages, fmt.Sprintf(o.format, o.v...))
}
}
c.logInternal(ctx, r, true, strings.Join(messages, " "))
if shouldRepanic {
panic(x)
}
}
// Report writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Report can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprint(v...))
}
// Reportf writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
}
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
// limit the stack trace to 16k.
var buf [16384]byte
stack := buf[0:runtime.Stack(buf[:], false)]
message := msg + "\n" + chopStack(stack, isPanic)
if c == nil {
log.Println("Error report used nil client:", message)
return
}
c.send(ctx, r, message)
}
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
payload := map[string]interface{}{
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
"message": message,
"serviceContext": s.serviceContext,
}
if r != nil {
payload["context"] = map[string]interface{}{
"httpRequest": map[string]interface{}{
"method": r.Method,
"url": r.Host + r.RequestURI,
"userAgent": r.UserAgent(),
"referrer": r.Referer(),
"remoteIp": r.RemoteAddr,
},
}
}
e := logging.Entry{
Severity: logging.Error,
Payload: payload,
}
err := s.logger.LogSync(ctx, e)
if err != nil {
log.Println("Error writing error report:", err, "report:", payload)
}
}
func (s *loggingSender) close() error {
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
time := time.Now()
var errorContext *erpb.ErrorContext
if r != nil {
errorContext = &erpb.ErrorContext{
HttpRequest: &erpb.HttpRequestContext{
Method: r.Method,
Url: r.Host + r.RequestURI,
UserAgent: r.UserAgent(),
Referrer: r.Referer(),
RemoteIp: r.RemoteAddr,
},
}
}
req := erpb.ReportErrorEventRequest{
ProjectName: s.projectID,
Event: &erpb.ReportedErrorEvent{
EventTime: &timestamp.Timestamp{
Seconds: time.Unix(),
Nanos: int32(time.Nanosecond()),
},
ServiceContext: &s.serviceContext,
Message: message,
Context: errorContext,
},
}
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
if err != nil {
log.Println("Error writing error report:", err, "report:", message)
}
}
func (s *errorApiSender) close() error {
return s.apiClient.Close()
}
// chopStack trims a stack trace so that the function which panics or calls
// Report is first.
func chopStack(s []byte, isPanic bool) string {
var f []byte
if isPanic {
f = []byte("panic(")
} else {
f = []byte("cloud.google.com/go/errorreporting.(*Client).Report")
}
lfFirst := bytes.IndexByte(s, '\n')
if lfFirst == -1 {
return string(s)
}
stack := s[lfFirst:]
panicLine := bytes.Index(stack, f)
if panicLine == -1 {
return string(s)
}
stack = stack[panicLine+1:]
for i := 0; i < 2; i++ {
nextLine := bytes.IndexByte(stack, '\n')
if nextLine == -1 {
return string(s)
}
stack = stack[nextLine+1:]
}
return string(s[:lfFirst+1]) + string(stack)
}

View file

@ -0,0 +1,212 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const testProjectID = "testproject"
type fakeReportErrorsClient struct {
req *erpb.ReportErrorEventRequest
fail bool
}
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
if c.fail {
return nil, errors.New("request failed")
}
c.req = req
return &erpb.ReportErrorEventResponse{}, nil
}
func (c *fakeReportErrorsClient) Close() error {
return nil
}
func newTestClient(c *fakeReportErrorsClient) *Client {
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
var ctx context.Context
func init() {
ctx = context.Background()
}
func TestCatchNothing(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
if req.Event.ServiceContext.Service != "myservice" {
t.Errorf("error report didn't contain service name")
}
if req.Event.ServiceContext.Version != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(req.Event.Message, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(req.Event.Message, panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchPanic")
if !strings.Contains(r.Event.Message, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClient(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReports(t *testing.T) {
fc := &fakeReportErrorsClient{fail: true}
c := newTestClient(fc)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchNilPanic")
if !strings.Contains(r.Event.Message, "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReport(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Report(ctx, nil, "hello, ", "error")
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestReport")
}
func TestReportf(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestReportf")
if !strings.Contains(r.Event.Message, "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}

118
vendor/cloud.google.com/go/errorreporting/stack_test.go generated vendored Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import "testing"
func TestChopStack(t *testing.T) {
for _, test := range []struct {
name string
in []byte
expected string
isPanic bool
}{
{
name: "Catch",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
panic()
/gopath/src/runtime/panic.go:458 +0x243
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "function not found",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "Report",
in: []byte(` goroutine 39 [running]:
runtime/debug.Stack()
/gopath/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Report()
/gopath/cloud.google.com/go/errorreporting/errors.go:248 +0x4ed
cloud.google.com/go/errorreporting.TestReport()
/gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`),
expected: ` goroutine 39 [running]:
cloud.google.com/go/errorreporting.TestReport()
/gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`,
isPanic: false,
},
} {
out := chopStack(test.in, test.isPanic)
if out != test.expected {
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
}
}
}

View file

@ -204,3 +204,12 @@ func TestReportfUsingLogging(t *testing.T) {
t.Errorf("error report didn't contain formatted message") t.Errorf("error report didn't contain formatted message")
} }
} }
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

View file

@ -18,14 +18,7 @@
// //
// See https://cloud.google.com/error-reporting/ for more information. // See https://cloud.google.com/error-reporting/ for more information.
// //
// To initialize a client, use the NewClient function. Generally you will want // To initialize a client, use the NewClient function.
// to do this on program initialization. The NewClient function takes as
// arguments a context, the project name, a service name, and a version string.
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty. NewClient
// also takes a bool that indicates whether to report errors using Stackdriver
// Logging, which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
// //
// import "cloud.google.com/go/errors" // import "cloud.google.com/go/errors"
// ... // ...
@ -76,6 +69,8 @@
// If you try to write an error report with a nil client, or if the client // If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using // fails to write the report to the server, the error report is logged using
// log.Println. // log.Println.
//
// Deprecated: Use cloud.google.com/go/errorreporting instead.
package errors // import "cloud.google.com/go/errors" package errors // import "cloud.google.com/go/errors"
import ( import (
@ -155,9 +150,9 @@ type loggingSender struct {
logger loggerInterface logger loggerInterface
projectID string projectID string
serviceContext map[string]string serviceContext map[string]string
client *logging.Client
} }
// Client represents a Google Cloud Error Reporting client.
type Client struct { type Client struct {
sender sender
// RepanicDefault determines whether Catch will re-panic after recovering a // RepanicDefault determines whether Catch will re-panic after recovering a
@ -166,6 +161,16 @@ type Client struct {
RepanicDefault bool RepanicDefault bool
} }
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) { func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging { if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...) l, err := newLoggerInterface(ctx, projectID, opts...)
@ -383,7 +388,7 @@ func (s *loggingSender) send(ctx context.Context, r *http.Request, message strin
} }
func (s *loggingSender) close() error { func (s *loggingSender) close() error {
return s.client.Close() return s.logger.Close()
} }
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) { func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {

View file

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -32,12 +32,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
iamProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
iamServiceAccountPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}")
iamKeyPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}/keys/{key}")
)
// IamCallOptions contains the retry settings for each method of IamClient. // IamCallOptions contains the retry settings for each method of IamClient.
type IamCallOptions struct { type IamCallOptions struct {
ListServiceAccounts []gax.CallOption ListServiceAccounts []gax.CallOption
@ -119,16 +113,16 @@ type IamClient struct {
// of to an individual end user. It is used to authenticate calls // of to an individual end user. It is used to authenticate calls
// to a Google API. // to a Google API.
// //
// To create a service account, specify the `project_id` and `account_id` // To create a service account, specify the project_id and account_id
// for the account. The `account_id` is unique within the project, and used // for the account. The account_id is unique within the project, and used
// to generate the service account email address and a stable // to generate the service account email address and a stable
// `unique_id`. // unique_id.
// //
// All other methods can identify accounts using the format // All other methods can identify accounts using the format
// `projects/{project}/serviceAccounts/{account}`. // projects/{project}/serviceAccounts/{account}.
// Using `-` as a wildcard for the project will infer the project from // Using - as a wildcard for the project will infer the project from
// the account. The `account` value can be the `email` address or the // the account. The account value can be the email address or the
// `unique_id` of the service account. // unique_id of the service account.
func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) { func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...) conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...)
if err != nil { if err != nil {
@ -140,7 +134,7 @@ func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient,
iamClient: adminpb.NewIAMClient(conn), iamClient: adminpb.NewIAMClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
return c, nil return c, nil
} }
@ -155,10 +149,10 @@ func (c *IamClient) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *IamClient) SetGoogleClientInfo(keyval ...string) { func (c *IamClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -166,38 +160,32 @@ func (c *IamClient) SetGoogleClientInfo(keyval ...string) {
// IamProjectPath returns the path for the project resource. // IamProjectPath returns the path for the project resource.
func IamProjectPath(project string) string { func IamProjectPath(project string) string {
path, err := iamProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// IamServiceAccountPath returns the path for the service account resource. // IamServiceAccountPath returns the path for the service account resource.
func IamServiceAccountPath(project, serviceAccount string) string { func IamServiceAccountPath(project, serviceAccount string) string {
path, err := iamServiceAccountPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"service_account": serviceAccount, project +
}) "/serviceAccounts/" +
if err != nil { serviceAccount +
panic(err) ""
}
return path
} }
// IamKeyPath returns the path for the key resource. // IamKeyPath returns the path for the key resource.
func IamKeyPath(project, serviceAccount, key string) string { func IamKeyPath(project, serviceAccount, key string) string {
path, err := iamKeyPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"service_account": serviceAccount, project +
"key": key, "/serviceAccounts/" +
}) serviceAccount +
if err != nil { "/keys/" +
panic(err) key +
} ""
return path
} }
// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. // ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project.
@ -271,8 +259,8 @@ func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.Creat
// UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. // UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
// //
// Currently, only the following fields are updatable: // Currently, only the following fields are updatable:
// `display_name` . // display_name .
// The `etag` is mandatory. // The etag is mandatory.
func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...) opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...)

View file

@ -38,7 +38,7 @@ go get -v ./...
# cd $GOCLOUD_HOME # cd $GOCLOUD_HOME
# Run tests and tee output to log file, to be pushed to GCS as artifact. # Run tests and tee output to log file, to be pushed to GCS as artifact.
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_CHANGE_NUMBER.txt
# Make sure README.md is up to date. # Make sure README.md is up to date.
make -C internal/readme test diff make -C internal/readme test diff

View file

@ -20,6 +20,7 @@ package optional
import ( import (
"fmt" "fmt"
"strings" "strings"
"time"
) )
type ( type (
@ -37,6 +38,9 @@ type (
// Float64 is either a float64 or nil. // Float64 is either a float64 or nil.
Float64 interface{} Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
) )
// ToBool returns its argument as a bool. // ToBool returns its argument as a bool.
@ -89,6 +93,16 @@ func ToFloat64(v Float64) float64 {
return x return x
} }
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) { func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
} }

View file

@ -0,0 +1,71 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package language
import (
languagepb "google.golang.org/genproto/googleapis/cloud/language/v1"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestLanguageServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var content string = "Hello, world!"
var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT
var document = &languagepb.Document{
Source: &languagepb.Document_Content{
Content: content,
},
Type: type_,
}
var request = &languagepb.AnalyzeSentimentRequest{
Document: document,
}
if _, err := c.AnalyzeSentiment(ctx, request); err != nil {
t.Error(err)
}
}

View file

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -97,7 +97,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
client: languagepb.NewLanguageServiceClient(conn), client: languagepb.NewLanguageServiceClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
return c, nil return c, nil
} }
@ -112,10 +112,10 @@ func (c *Client) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) { func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}

View file

@ -208,10 +208,8 @@ func TestLanguageServiceAnalyzeEntities(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{ var request = &languagepb.AnalyzeEntitiesRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -239,10 +237,8 @@ func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error") mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{ var request = &languagepb.AnalyzeEntitiesRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -271,10 +267,8 @@ func TestLanguageServiceAnalyzeSyntax(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{ var request = &languagepb.AnalyzeSyntaxRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -302,10 +296,8 @@ func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error") mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{ var request = &languagepb.AnalyzeSyntaxRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -335,11 +327,9 @@ func TestLanguageServiceAnnotateText(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{ var request = &languagepb.AnnotateTextRequest{
Document: document, Document: document,
Features: features, Features: features,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -368,11 +358,9 @@ func TestLanguageServiceAnnotateTextError(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{ var request = &languagepb.AnnotateTextRequest{
Document: document, Document: document,
Features: features, Features: features,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)

View file

@ -0,0 +1,71 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package language
import (
languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestLanguageServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var content string = "Hello, world!"
var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT
var document = &languagepb.Document{
Source: &languagepb.Document_Content{
Content: content,
},
Type: type_,
}
var request = &languagepb.AnalyzeSentimentRequest{
Document: document,
}
if _, err := c.AnalyzeSentiment(ctx, request); err != nil {
t.Error(err)
}
}

View file

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -99,7 +99,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
client: languagepb.NewLanguageServiceClient(conn), client: languagepb.NewLanguageServiceClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
return c, nil return c, nil
} }
@ -114,10 +114,10 @@ func (c *Client) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) { func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}

View file

@ -220,10 +220,8 @@ func TestLanguageServiceAnalyzeEntities(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{ var request = &languagepb.AnalyzeEntitiesRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -251,10 +249,8 @@ func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error") mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{ var request = &languagepb.AnalyzeEntitiesRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -283,10 +279,8 @@ func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitySentimentRequest{ var request = &languagepb.AnalyzeEntitySentimentRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -314,10 +308,8 @@ func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error") mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitySentimentRequest{ var request = &languagepb.AnalyzeEntitySentimentRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -346,10 +338,8 @@ func TestLanguageServiceAnalyzeSyntax(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{ var request = &languagepb.AnalyzeSyntaxRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -377,10 +367,8 @@ func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error") mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{ var request = &languagepb.AnalyzeSyntaxRequest{
Document: document, Document: document,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -410,11 +398,9 @@ func TestLanguageServiceAnnotateText(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{ var request = &languagepb.AnnotateTextRequest{
Document: document, Document: document,
Features: features, Features: features,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)
@ -443,11 +429,9 @@ func TestLanguageServiceAnnotateTextError(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{} var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{ var request = &languagepb.AnnotateTextRequest{
Document: document, Document: document,
Features: features, Features: features,
EncodingType: encodingType,
} }
c, err := NewClient(context.Background(), clientOpt) c, err := NewClient(context.Background(), clientOpt)

View file

@ -31,11 +31,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
configProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}")
)
// ConfigCallOptions contains the retry settings for each method of ConfigClient. // ConfigCallOptions contains the retry settings for each method of ConfigClient.
type ConfigCallOptions struct { type ConfigCallOptions struct {
ListSinks []gax.CallOption ListSinks []gax.CallOption
@ -133,25 +128,20 @@ func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
// ConfigProjectPath returns the path for the project resource. // ConfigProjectPath returns the path for the project resource.
func ConfigProjectPath(project string) string { func ConfigProjectPath(project string) string {
path, err := configProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// ConfigSinkPath returns the path for the sink resource. // ConfigSinkPath returns the path for the sink resource.
func ConfigSinkPath(project, sink string) string { func ConfigSinkPath(project, sink string) string {
path, err := configSinkPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"sink": sink, project +
}) "/sinks/" +
if err != nil { sink +
panic(err) ""
}
return path
} }
// ListSinks lists sinks. // ListSinks lists sinks.
@ -208,7 +198,7 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques
// CreateSink creates a sink that exports specified log entries to a destination. The // CreateSink creates a sink that exports specified log entries to a destination. The
// export of newly-ingested log entries begins immediately, unless the current // export of newly-ingested log entries begins immediately, unless the current
// time is outside the sink's start and end times or the sink's // time is outside the sink's start and end times or the sink's
// `writer_identity` is not permitted to write to the destination. A sink can // writer_identity is not permitted to write to the destination. A sink can
// export log entries only from the resource owning the sink. // export log entries only from the resource owning the sink.
func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
@ -227,12 +217,12 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink
// UpdateSink updates a sink. If the named sink doesn't exist, then this method is // UpdateSink updates a sink. If the named sink doesn't exist, then this method is
// identical to // identical to
// [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create). // sinks.create (at /logging/docs/api/reference/rest/v2/projects.sinks/create).
// If the named sink does exist, then this method replaces the following // If the named sink does exist, then this method replaces the following
// fields in the existing sink with values from the new sink: `destination`, // fields in the existing sink with values from the new sink: destination,
// `filter`, `output_version_format`, `start_time`, and `end_time`. // filter, output_version_format, start_time, and end_time.
// The updated filter might also have a new `writer_identity`; see the // The updated filter might also have a new writer_identity; see the
// `unique_writer_identity` field. // unique_writer_identity field.
func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
@ -248,7 +238,7 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink
return resp, nil return resp, nil
} }
// DeleteSink deletes a sink. If the sink has a unique `writer_identity`, then that // DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
// service account is also deleted. // service account is also deleted.
func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)

View file

@ -35,8 +35,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -32,11 +32,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
loggingProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}")
)
// CallOptions contains the retry settings for each method of Client. // CallOptions contains the retry settings for each method of Client.
type CallOptions struct { type CallOptions struct {
DeleteLog []gax.CallOption DeleteLog []gax.CallOption
@ -146,25 +141,20 @@ func (c *Client) SetGoogleClientInfo(keyval ...string) {
// ProjectPath returns the path for the project resource. // ProjectPath returns the path for the project resource.
func ProjectPath(project string) string { func ProjectPath(project string) string {
path, err := loggingProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// LogPath returns the path for the log resource. // LogPath returns the path for the log resource.
func LogPath(project, log string) string { func LogPath(project, log string) string {
path, err := loggingLogPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"log": log, project +
}) "/logs/" +
if err != nil { log +
panic(err) ""
}
return path
} }
// DeleteLog deletes all the log entries in a log. // DeleteLog deletes all the log entries in a log.
@ -200,7 +190,7 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt
// ListLogEntries lists log entries. Use this method to retrieve log entries from // ListLogEntries lists log entries. Use this method to retrieve log entries from
// Stackdriver Logging. For ways to export log entries, see // Stackdriver Logging. For ways to export log entries, see
// [Exporting Logs](/logging/docs/export). // Exporting Logs (at /logging/docs/export).
func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)

View file

@ -31,11 +31,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
metricsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}")
)
// MetricsCallOptions contains the retry settings for each method of MetricsClient. // MetricsCallOptions contains the retry settings for each method of MetricsClient.
type MetricsCallOptions struct { type MetricsCallOptions struct {
ListLogMetrics []gax.CallOption ListLogMetrics []gax.CallOption
@ -132,25 +127,20 @@ func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
// MetricsProjectPath returns the path for the project resource. // MetricsProjectPath returns the path for the project resource.
func MetricsProjectPath(project string) string { func MetricsProjectPath(project string) string {
path, err := metricsProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// MetricsMetricPath returns the path for the metric resource. // MetricsMetricPath returns the path for the metric resource.
func MetricsMetricPath(project, metric string) string { func MetricsMetricPath(project, metric string) string {
path, err := metricsMetricPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"metric": metric, project +
}) "/metrics/" +
if err != nil { metric +
panic(err) ""
}
return path
} }
// ListLogMetrics lists logs-based metrics. // ListLogMetrics lists logs-based metrics.

View file

@ -46,7 +46,9 @@ import (
logtypepb "google.golang.org/genproto/googleapis/logging/type" logtypepb "google.golang.org/genproto/googleapis/logging/type"
logpb "google.golang.org/genproto/googleapis/logging/v2" logpb "google.golang.org/genproto/googleapis/logging/v2"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
// Import the following so EntryIterator can unmarshal log protos. // Import the following so EntryIterator can unmarshal log protos.
_ "google.golang.org/genproto/googleapis/appengine/logging/v1"
_ "google.golang.org/genproto/googleapis/cloud/audit" _ "google.golang.org/genproto/googleapis/cloud/audit"
) )

View file

@ -87,8 +87,7 @@ func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) {
return fromLogSink(ls), nil return fromLogSink(ls), nil
} }
// UpdateSink updates an existing Sink, or creates a new one if the Sink doesn't exist. // UpdateSink updates an existing Sink. Requires AdminScope.
// Requires AdminScope.
func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) { func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) {
ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{ ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{
SinkName: c.sinkPath(sink.ID), SinkName: c.sinkPath(sink.ID),

View file

@ -152,7 +152,9 @@ func TestUpdateSink(t *testing.T) {
Filter: testFilter, Filter: testFilter,
} }
// Updating a non-existent sink creates a new one. if _, err := client.CreateSink(ctx, sink); err != nil {
t.Fatal(err)
}
got, err := client.UpdateSink(ctx, sink) got, err := client.UpdateSink(ctx, sink)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View file

@ -86,7 +86,11 @@ var now = time.Now
// ErrOverflow signals that the number of buffered entries for a Logger // ErrOverflow signals that the number of buffered entries for a Logger
// exceeds its BufferLimit. // exceeds its BufferLimit.
var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") var ErrOverflow = bundler.ErrOverflow
// ErrOversizedEntry signals that an entry's size exceeds the maximum number of
// bytes that will be sent in a single call to the logging service.
var ErrOversizedEntry = bundler.ErrOversizedItem
// Client is a Logging client. A Client is associated with a single Cloud project. // Client is a Logging client. A Client is associated with a single Cloud project.
type Client struct { type Client struct {
@ -331,10 +335,10 @@ type entryByteThreshold int
func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) }
// EntryByteLimit is the maximum number of bytes of entries that will be sent // EntryByteLimit is the maximum number of bytes of entries that will be sent
// in a single call to the logging service. This option limits the size of a // in a single call to the logging service. ErrOversizedEntry is returned if an
// single RPC payload, to account for network or service issues with large // entry exceeds EntryByteLimit. This option limits the size of a single RPC
// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has // payload, to account for network or service issues with large RPCs. If
// no effect. // EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect.
// The default is zero, meaning there is no limit. // The default is zero, meaning there is no limit.
func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) }

View file

@ -33,8 +33,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{} return []string{}
} }

View file

@ -93,7 +93,7 @@ type OperationsClient struct {
// interface to receive the real response asynchronously by polling the // interface to receive the real response asynchronously by polling the
// operation resource, or pass the operation resource to another API (such as // operation resource, or pass the operation resource to another API (such as
// Google Cloud Pub/Sub API) to receive the response. Any API service that // Google Cloud Pub/Sub API) to receive the response. Any API service that
// returns long-running operations should implement the `Operations` interface // returns long-running operations should implement the Operations interface
// so developers can have a consistent client experience. // so developers can have a consistent client experience.
func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...) conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...)
@ -149,10 +149,10 @@ func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.
} }
// ListOperations lists operations that match the specified filter in the request. If the // ListOperations lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns `UNIMPLEMENTED`. // server doesn't support this method, it returns UNIMPLEMENTED.
// //
// NOTE: the `name` binding below allows API services to override the binding // NOTE: the name binding below allows API services to override the binding
// to use different resource name schemes, such as `users/*/operations`. // to use different resource name schemes, such as users/*/operations.
func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
@ -190,13 +190,13 @@ func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningp
// CancelOperation starts asynchronous cancellation on a long-running operation. The server // CancelOperation starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not // makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns // guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use // google.rpc.Code.UNIMPLEMENTED. Clients can use
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
// other methods to check whether the cancellation succeeded or whether the // other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation, // operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with // the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`. // corresponding to Code.CANCELLED.
func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
@ -211,7 +211,7 @@ func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunning
// DeleteOperation deletes a long-running operation. This method indicates that the client is // DeleteOperation deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the // no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, it returns // operation. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. // google.rpc.Code.UNIMPLEMENTED.
func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...) opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...)

View file

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -32,11 +32,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
groupProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
groupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}")
)
// GroupCallOptions contains the retry settings for each method of GroupClient. // GroupCallOptions contains the retry settings for each method of GroupClient.
type GroupCallOptions struct { type GroupCallOptions struct {
ListGroups []gax.CallOption ListGroups []gax.CallOption
@ -97,7 +92,7 @@ type GroupClient struct {
// NewGroupClient creates a new group service client. // NewGroupClient creates a new group service client.
// //
// The Group API lets you inspect and manage your // The Group API lets you inspect and manage your
// [groups](google.monitoring.v3.Group). // groups (at google.monitoring.v3.Group).
// //
// A group is a named filter that is used to identify // A group is a named filter that is used to identify
// a collection of monitored resources. Groups are typically used to // a collection of monitored resources. Groups are typically used to
@ -119,7 +114,7 @@ func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupCli
groupClient: monitoringpb.NewGroupServiceClient(conn), groupClient: monitoringpb.NewGroupServiceClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
return c, nil return c, nil
} }
@ -134,10 +129,10 @@ func (c *GroupClient) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *GroupClient) SetGoogleClientInfo(keyval ...string) { func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -145,25 +140,20 @@ func (c *GroupClient) SetGoogleClientInfo(keyval ...string) {
// GroupProjectPath returns the path for the project resource. // GroupProjectPath returns the path for the project resource.
func GroupProjectPath(project string) string { func GroupProjectPath(project string) string {
path, err := groupProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// GroupGroupPath returns the path for the group resource. // GroupGroupPath returns the path for the group resource.
func GroupGroupPath(project, group string) string { func GroupGroupPath(project, group string) string {
path, err := groupGroupPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"group": group, project +
}) "/groups/" +
if err != nil { group +
panic(err) ""
}
return path
} }
// ListGroups lists the existing groups. // ListGroups lists the existing groups.
@ -234,7 +224,7 @@ func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateG
} }
// UpdateGroup updates an existing group. // UpdateGroup updates an existing group.
// You can change any group attributes except `name`. // You can change any group attributes except name.
func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)

View file

@ -33,12 +33,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
metricProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
metricMetricDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metricDescriptors/{metric_descriptor=**}")
metricMonitoredResourceDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}")
)
// MetricCallOptions contains the retry settings for each method of MetricClient. // MetricCallOptions contains the retry settings for each method of MetricClient.
type MetricCallOptions struct { type MetricCallOptions struct {
ListMonitoredResourceDescriptors []gax.CallOption ListMonitoredResourceDescriptors []gax.CallOption
@ -115,7 +109,7 @@ func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricC
metricClient: monitoringpb.NewMetricServiceClient(conn), metricClient: monitoringpb.NewMetricServiceClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
return c, nil return c, nil
} }
@ -130,10 +124,10 @@ func (c *MetricClient) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *MetricClient) SetGoogleClientInfo(keyval ...string) { func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -141,37 +135,30 @@ func (c *MetricClient) SetGoogleClientInfo(keyval ...string) {
// MetricProjectPath returns the path for the project resource. // MetricProjectPath returns the path for the project resource.
func MetricProjectPath(project string) string { func MetricProjectPath(project string) string {
path, err := metricProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// MetricMetricDescriptorPath returns the path for the metric descriptor resource. // MetricMetricDescriptorPath returns the path for the metric descriptor resource.
func MetricMetricDescriptorPath(project, metricDescriptor string) string { func MetricMetricDescriptorPath(project, metricDescriptor string) string {
path, err := metricMetricDescriptorPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"metric_descriptor": metricDescriptor, project +
}) "/metricDescriptors/" +
if err != nil { metricDescriptor +
panic(err) ""
}
return path
} }
// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. // MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource.
func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string {
path, err := metricMonitoredResourceDescriptorPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"monitored_resource_descriptor": monitoredResourceDescriptor, project +
}) "/monitoredResourceDescriptors/" +
if err != nil { monitoredResourceDescriptor +
panic(err) ""
}
return path
} }
// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. // ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
@ -278,7 +265,7 @@ func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringp
// CreateMetricDescriptor creates a new metric descriptor. // CreateMetricDescriptor creates a new metric descriptor.
// User-created metric descriptors define // User-created metric descriptors define
// [custom metrics](/monitoring/custom-metrics). // custom metrics (at /monitoring/custom-metrics).
func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...)
@ -295,7 +282,7 @@ func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitori
} }
// DeleteMetricDescriptor deletes a metric descriptor. Only user-created // DeleteMetricDescriptor deletes a metric descriptor. Only user-created
// [custom metrics](/monitoring/custom-metrics) can be deleted. // custom metrics (at /monitoring/custom-metrics) can be deleted.
func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...)

View file

@ -16,29 +16,28 @@
// //
// This package is still experimental and subject to change. // This package is still experimental and subject to change.
// //
// Usage example:
//
// import "cloud.google.com/go/profiler"
// ...
// err := profiler.Start(profiler.Config{Service: "my-service"})
// if err != nil {
// // TODO: Handle error.
// }
//
// Calling Start will start a goroutine to collect profiles and // Calling Start will start a goroutine to collect profiles and
// upload to Cloud Profiler server, at the rhythm specified by // upload to Cloud Profiler server, at the rhythm specified by
// the server. // the server.
// //
// The caller should provide the target string in the config so Cloud // The caller must provide the service string in the config, and
// Profiler knows how to group the profile data. Otherwise the target // may provide other information as well. See Config for details.
// string is set to "unknown".
//
// Optionally DebugLogging can be set in the config to enable detailed
// logging from profiler.
//
// Start should only be called once. The first call will start
// the profiling goroutine. Any additional calls will be ignored.
package profiler package profiler
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"log" "log"
"runtime/pprof" "runtime/pprof"
"sort"
"strings"
"sync" "sync"
"time" "time"
@ -48,7 +47,6 @@ import (
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/oauth2/google"
"google.golang.org/api/option" "google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc" gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2"
@ -60,7 +58,7 @@ import (
) )
var ( var (
config = &Config{} config Config
startOnce sync.Once startOnce sync.Once
// getProjectID, getInstanceName, getZone, startCPUProfile, stopCPUProfile, // getProjectID, getInstanceName, getZone, startCPUProfile, stopCPUProfile,
// writeHeapProfile and sleep are overrideable for testing. // writeHeapProfile and sleep are overrideable for testing.
@ -74,12 +72,12 @@ var (
) )
const ( const (
apiAddress = "cloudprofiler.googleapis.com:443" apiAddress = "cloudprofiler.googleapis.com:443"
xGoogAPIMetadata = "x-goog-api-client" xGoogAPIMetadata = "x-goog-api-client"
deploymentKeyMetadata = "x-profiler-deployment-key-bin" zoneNameLabel = "zone"
zoneNameLabel = "zone" versionLabel = "version"
instanceLabel = "instance" instanceLabel = "instance"
scope = "https://www.googleapis.com/auth/monitoring.write" scope = "https://www.googleapis.com/auth/monitoring.write"
initialBackoff = time.Second initialBackoff = time.Second
// Ensure the agent will recover within 1 hour. // Ensure the agent will recover within 1 hour.
@ -90,10 +88,25 @@ const (
// Config is the profiler configuration. // Config is the profiler configuration.
type Config struct { type Config struct {
// Target groups related deployments together, defaults to "unknown". // Service (or deprecated Target) must be provided to start the profiler.
Target string // It specifies the name of the service under which the profiled data
// will be recorded and exposed at the Cloud Profiler UI for the project.
// You can specify an arbitrary string, but see Deployment.target at
// https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudprofiler/v2/profiler.proto
// for restrictions.
// NOTE: The string should be the same across different replicas of
// your service so that the globally constant profiling rate is
// maintained. Do not put things like PID or unique pod ID in the name.
Service string
// DebugLogging enables detailed debug logging from profiler. // ServiceVersion is an optional field specifying the version of the
// service. It can be an arbitrary string. Cloud Profiler profiles
// once per minute for each version of each service in each zone.
// ServiceVersion defaults to an empty string.
ServiceVersion string
// DebugLogging enables detailed debug logging from profiler. It
// defaults to false.
DebugLogging bool DebugLogging bool
// ProjectID is the Cloud Console project ID to use instead of // ProjectID is the Cloud Console project ID to use instead of
@ -121,35 +134,36 @@ type Config struct {
// agent API. Defaults to the production environment, overridable // agent API. Defaults to the production environment, overridable
// for testing. // for testing.
APIAddr string APIAddr string
// Target is deprecated, use Service instead.
Target string
} }
// startError represents the error occured during the // startError represents the error occured during the
// initializating and starting of the agent. // initializating and starting of the agent.
var startError error var startError error
// Start starts a goroutine to collect and upload profiles. // Start starts a goroutine to collect and upload profiles. The
// See package level documentation for details. // caller must provide the service string in the config. See
func Start(cfg *Config, options ...option.ClientOption) error { // Config for details. Start should only be called once. Any
// additional calls will be ignored.
func Start(cfg Config, options ...option.ClientOption) error {
startOnce.Do(func() { startOnce.Do(func() {
startError = start(cfg, options...) startError = start(cfg, options...)
}) })
return startError return startError
} }
func start(cfg *Config, options ...option.ClientOption) error { func start(cfg Config, options ...option.ClientOption) error {
initializeConfig(cfg) if err := initializeConfig(cfg); err != nil {
debugLog("failed to initialize config: %v", err)
ctx := context.Background()
ts, err := google.DefaultTokenSource(ctx, scope)
if err != nil {
debugLog("failed to get application default credentials: %v", err)
return err return err
} }
ctx := context.Background()
opts := []option.ClientOption{ opts := []option.ClientOption{
option.WithEndpoint(config.APIAddr), option.WithEndpoint(config.APIAddr),
option.WithTokenSource(ts),
option.WithScopes(scope), option.WithScopes(scope),
} }
opts = append(opts, options...) opts = append(opts, options...)
@ -166,7 +180,9 @@ func start(cfg *Config, options ...option.ClientOption) error {
return err return err
} }
a, ctx := initializeResources(ctx, conn, d) l := initializeProfileLabels()
a, ctx := initializeResources(ctx, conn, d, l)
go pollProfilerService(ctx, a) go pollProfilerService(ctx, a)
return nil return nil
} }
@ -180,9 +196,9 @@ func debugLog(format string, e ...interface{}) {
// agent polls Cloud Profiler server for instructions on behalf of // agent polls Cloud Profiler server for instructions on behalf of
// a task, and collects and uploads profiles as requested. // a task, and collects and uploads profiles as requested.
type agent struct { type agent struct {
client *client client *client
deployment *pb.Deployment deployment *pb.Deployment
creationErrorCount int64 profileLabels map[string]string
} }
// abortedBackoffDuration retrieves the retry duration from gRPC trailing // abortedBackoffDuration retrieves the retry duration from gRPC trailing
@ -292,6 +308,7 @@ func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) {
} }
p.ProfileBytes = prof.Bytes() p.ProfileBytes = prof.Bytes()
p.Labels = a.profileLabels
req := pb.UpdateProfileRequest{Profile: p} req := pb.UpdateProfileRequest{Profile: p}
// Upload profile, discard profile in case of error. // Upload profile, discard profile in case of error.
@ -308,22 +325,6 @@ type client struct {
// Metadata for google API to be sent with each request. // Metadata for google API to be sent with each request.
xGoogHeader []string xGoogHeader []string
// Metadata for Cloud Profiler API to be sent with each request.
profilerHeader []string
}
// setProfilerHeader sets the unique key string for a deployment target in
// the `x-profiler-deployment-key-bin` header passed on each request.
// Intended for use by Cloud Profiler agents.
func (c *client) setProfilerHeader(d *pb.Deployment) {
labels := make([]string, 0, len(d.Labels))
for k, v := range d.Labels {
labels = append(labels, fmt.Sprintf("%s|%s", k, v))
}
sort.Strings(labels)
key := d.ProjectId + "##" + d.Target + "##" + strings.Join(labels, "#")
c.profilerHeader = []string{key}
} }
// setXGoogHeader sets the name and version of the application in // setXGoogHeader sets the name and version of the application in
@ -339,7 +340,6 @@ func (c *client) insertMetadata(ctx context.Context) context.Context {
md, _ := grpcmd.FromOutgoingContext(ctx) md, _ := grpcmd.FromOutgoingContext(ctx)
md = md.Copy() md = md.Copy()
md[xGoogAPIMetadata] = c.xGoogHeader md[xGoogAPIMetadata] = c.xGoogHeader
md[deploymentKeyMetadata] = c.profilerHeader
return grpcmd.NewOutgoingContext(ctx, md) return grpcmd.NewOutgoingContext(ctx, md)
} }
@ -354,14 +354,6 @@ func initializeDeployment() (*pb.Deployment, error) {
} }
} }
instance := config.InstanceName
if instance == "" {
instance, err = getInstanceName()
if err != nil {
return nil, err
}
}
zone := config.ZoneName zone := config.ZoneName
if zone == "" { if zone == "" {
zone, err = getZone() zone, err = getZone()
@ -370,39 +362,62 @@ func initializeDeployment() (*pb.Deployment, error) {
} }
} }
labels := map[string]string{
zoneNameLabel: zone,
}
if config.ServiceVersion != "" {
labels[versionLabel] = config.ServiceVersion
}
return &pb.Deployment{ return &pb.Deployment{
ProjectId: projectID, ProjectId: projectID,
Target: config.Target, Target: config.Target,
Labels: map[string]string{ Labels: labels,
instanceLabel: instance,
zoneNameLabel: zone,
},
}, nil }, nil
} }
func initializeResources(ctx context.Context, conn *grpc.ClientConn, d *pb.Deployment) (*agent, context.Context) { func initializeProfileLabels() map[string]string {
instance := config.InstanceName
if instance == "" {
var err error
if instance, err = getInstanceName(); err != nil {
instance = "unknown"
debugLog("failed to get instance name: %v", err)
}
}
return map[string]string{instanceLabel: instance}
}
func initializeResources(ctx context.Context, conn *grpc.ClientConn, d *pb.Deployment, l map[string]string) (*agent, context.Context) {
c := &client{ c := &client{
client: pb.NewProfilerServiceClient(conn), client: pb.NewProfilerServiceClient(conn),
} }
c.setXGoogHeader() c.setXGoogHeader()
c.setProfilerHeader(d)
ctx = c.insertMetadata(ctx) ctx = c.insertMetadata(ctx)
return &agent{ return &agent{
client: c, client: c,
deployment: d, deployment: d,
profileLabels: l,
}, ctx }, ctx
} }
func initializeConfig(cfg *Config) { func initializeConfig(cfg Config) error {
*config = *cfg config = cfg
if config.Service != "" {
config.Target = config.Service
}
if config.Target == "" { if config.Target == "" {
config.Target = "unknown" return errors.New("service name must be specified in the configuration")
} }
if config.APIAddr == "" { if config.APIAddr == "" {
config.APIAddr = apiAddress config.APIAddr = apiAddress
} }
return nil
} }
// pollProfilerService starts an endless loop to poll Cloud Profiler // pollProfilerService starts an endless loop to poll Cloud Profiler

View file

@ -19,13 +19,7 @@ import (
) )
func ExampleStart() { func ExampleStart() {
// The caller should provide the target string in the config so Cloud err := profiler.Start(profiler.Config{Service: "my-service", ServiceVersion: "v1"})
// Profiler knows how to group the profile data. Otherwise the target
// string is set to "unknown".
//
// Optionally DebugLogging can be set in the config to enable detailed
// logging from profiler.
err := profiler.Start(&profiler.Config{Target: "my-target"})
if err != nil { if err != nil {
//TODO: Handle error. //TODO: Handle error.
} }

View file

@ -17,10 +17,12 @@ package profiler
import ( import (
"errors" "errors"
"io" "io"
"runtime/pprof"
"strings" "strings"
"testing" "testing"
"time" "time"
gcemd "cloud.google.com/go/compute/metadata"
"cloud.google.com/go/internal/testutil" "cloud.google.com/go/internal/testutil"
"cloud.google.com/go/profiler/mocks" "cloud.google.com/go/profiler/mocks"
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
@ -36,30 +38,33 @@ import (
) )
const ( const (
testProjectID = "test-project-ID" testProjectID = "test-project-ID"
testInstanceName = "test-instance-name" testInstanceName = "test-instance-name"
testZoneName = "test-zone-name" testZoneName = "test-zone-name"
testTarget = "test-target" testTarget = "test-target"
testService = "test-service"
testServiceVersion = "test-service-version"
) )
func createTestDeployment() *pb.Deployment { func createTestDeployment() *pb.Deployment {
labels := make(map[string]string) labels := map[string]string{
labels[zoneNameLabel] = testZoneName zoneNameLabel: testZoneName,
labels[instanceLabel] = testInstanceName versionLabel: testServiceVersion,
}
return &pb.Deployment{ return &pb.Deployment{
ProjectId: testProjectID, ProjectId: testProjectID,
Target: testTarget, Target: testService,
Labels: labels, Labels: labels,
} }
} }
func createTestAgent(psc pb.ProfilerServiceClient) *agent { func createTestAgent(psc pb.ProfilerServiceClient) *agent {
c := &client{client: psc} c := &client{client: psc}
a := &agent{ return &agent{
client: c, client: c,
deployment: createTestDeployment(), deployment: createTestDeployment(),
profileLabels: map[string]string{instanceLabel: testInstanceName},
} }
return a
} }
func createTrailers(dur time.Duration) map[string]string { func createTrailers(dur time.Duration) map[string]string {
@ -93,6 +98,13 @@ func TestCreateProfile(t *testing.T) {
} }
func TestProfileAndUpload(t *testing.T) { func TestProfileAndUpload(t *testing.T) {
defer func() {
startCPUProfile = pprof.StartCPUProfile
stopCPUProfile = pprof.StopCPUProfile
writeHeapProfile = pprof.WriteHeapProfile
sleep = gax.Sleep
}()
ctx := context.Background() ctx := context.Background()
ctrl := gomock.NewController(t) ctrl := gomock.NewController(t)
defer ctrl.Finish() defer ctrl.Finish()
@ -188,12 +200,12 @@ func TestProfileAndUpload(t *testing.T) {
if tt.duration != nil { if tt.duration != nil {
p.Duration = ptypes.DurationProto(*tt.duration) p.Duration = ptypes.DurationProto(*tt.duration)
} }
if tt.wantBytes != nil { if tt.wantBytes != nil {
wantProfile := &pb.Profile{ wantProfile := &pb.Profile{
ProfileType: p.ProfileType, ProfileType: p.ProfileType,
Duration: p.Duration, Duration: p.Duration,
ProfileBytes: tt.wantBytes, ProfileBytes: tt.wantBytes,
Labels: a.profileLabels,
} }
wantRequest := pb.UpdateProfileRequest{ wantRequest := pb.UpdateProfileRequest{
Profile: wantProfile, Profile: wantProfile,
@ -296,9 +308,11 @@ func TestRetry(t *testing.T) {
func TestInitializeResources(t *testing.T) { func TestInitializeResources(t *testing.T) {
d := createTestDeployment() d := createTestDeployment()
l := map[string]string{instanceLabel: testInstanceName}
ctx := context.Background() ctx := context.Background()
a, ctx := initializeResources(ctx, nil, d) a, ctx := initializeResources(ctx, nil, d, l)
if xg := a.client.xGoogHeader; len(xg) == 0 { if xg := a.client.xGoogHeader; len(xg) == 0 {
t.Errorf("initializeResources() sets empty xGoogHeader") t.Errorf("initializeResources() sets empty xGoogHeader")
@ -317,44 +331,93 @@ func TestInitializeResources(t *testing.T) {
} }
} }
wantPH := "test-project-ID##test-target##instance|test-instance-name#zone|test-zone-name"
if ph := a.client.profilerHeader; len(ph) == 0 {
t.Errorf("initializeResources() sets empty profilerHeader")
} else if ph[0] != wantPH {
t.Errorf("initializeResources() sets wrong profilerHeader, got: %v, want: %v", ph[0], wantPH)
}
md, _ := grpcmd.FromOutgoingContext(ctx) md, _ := grpcmd.FromOutgoingContext(ctx)
if !testutil.Equal(md[xGoogAPIMetadata], a.client.xGoogHeader) { if !testutil.Equal(md[xGoogAPIMetadata], a.client.xGoogHeader) {
t.Errorf("md[%v] = %v, want equal xGoogHeader = %v", xGoogAPIMetadata, md[xGoogAPIMetadata], a.client.xGoogHeader) t.Errorf("md[%v] = %v, want equal xGoogHeader = %v", xGoogAPIMetadata, md[xGoogAPIMetadata], a.client.xGoogHeader)
} }
if !testutil.Equal(md[deploymentKeyMetadata], a.client.profilerHeader) {
t.Errorf("md[%v] = %v, want equal profilerHeader = %v", deploymentKeyMetadata, md[deploymentKeyMetadata], a.client.profilerHeader)
}
} }
func TestInitializeDeployment(t *testing.T) { func TestInitializeDeployment(t *testing.T) {
defer func() {
getProjectID = gcemd.ProjectID
getZone = gcemd.Zone
config = Config{}
}()
getProjectID = func() (string, error) { getProjectID = func() (string, error) {
return testProjectID, nil return testProjectID, nil
} }
getInstanceName = func() (string, error) {
return testInstanceName, nil
}
getZone = func() (string, error) { getZone = func() (string, error) {
return testZoneName, nil return testZoneName, nil
} }
config = &Config{Target: testTarget} cfg := Config{Service: testService, ServiceVersion: testServiceVersion}
initializeConfig(cfg)
d, err := initializeDeployment() d, err := initializeDeployment()
if err != nil { if err != nil {
t.Errorf("initializeDeployment() got error: %v, want no error", err) t.Errorf("initializeDeployment() got error: %v, want no error", err)
} }
want := createTestDeployment() if want := createTestDeployment(); !testutil.Equal(d, want) {
t.Errorf("initializeDeployment() got: %v, want %v", d, want)
if !testutil.Equal(d, want) { }
t.Errorf("initializeDeployment() got wrong deployment, got: %v, want %v", d, want) }
func TestInitializeConfig(t *testing.T) {
oldConfig := config
defer func() {
config = oldConfig
}()
for _, tt := range []struct {
config Config
wantTarget string
wantErrorString string
}{
{
Config{Service: testService},
testService,
"",
},
{
Config{Target: testTarget},
testTarget,
"",
},
{
Config{},
"",
"service name must be specified in the configuration",
},
} {
errorString := ""
if err := initializeConfig(tt.config); err != nil {
errorString = err.Error()
}
if errorString != tt.wantErrorString {
t.Errorf("initializeConfig(%v) got error: %v, want %v", tt.config, errorString, tt.wantErrorString)
}
if config.Target != tt.wantTarget {
t.Errorf("initializeConfig(%v) got target: %v, want %v", tt.config, config.Target, tt.wantTarget)
}
}
}
func TestInitializeProfileLabels(t *testing.T) {
defer func() {
getInstanceName = gcemd.InstanceName
}()
getInstanceName = func() (string, error) {
return testInstanceName, nil
}
l := initializeProfileLabels()
want := map[string]string{instanceLabel: testInstanceName}
if !testutil.Equal(l, want) {
t.Errorf("initializeProfileLabels() got: %v, want %v", l, want)
} }
} }

View file

@ -1,159 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"sync"
"time"
"golang.org/x/net/context"
)
// ackBuffer stores the pending ack IDs and notifies the Dirty channel when it becomes non-empty.
type ackBuffer struct {
Dirty chan struct{}
// Close done when ackBuffer is no longer needed.
Done chan struct{}
mu sync.Mutex
pending []string
send bool
}
// Add adds ackID to the buffer.
func (buf *ackBuffer) Add(ackID string) {
buf.mu.Lock()
defer buf.mu.Unlock()
buf.pending = append(buf.pending, ackID)
// If we are transitioning into a non-empty notification state.
if buf.send && len(buf.pending) == 1 {
buf.notify()
}
}
// RemoveAll removes all ackIDs from the buffer and returns them.
func (buf *ackBuffer) RemoveAll() []string {
buf.mu.Lock()
defer buf.mu.Unlock()
ret := buf.pending
buf.pending = nil
return ret
}
// SendNotifications enables sending dirty notification on empty -> non-empty transitions.
// If the buffer is already non-empty, a notification will be sent immediately.
func (buf *ackBuffer) SendNotifications() {
buf.mu.Lock()
defer buf.mu.Unlock()
buf.send = true
// If we are transitioning into a non-empty notification state.
if len(buf.pending) > 0 {
buf.notify()
}
}
func (buf *ackBuffer) notify() {
go func() {
select {
case buf.Dirty <- struct{}{}:
case <-buf.Done:
}
}()
}
// acker acks messages in batches.
type acker struct {
s service
Ctx context.Context // The context to use when acknowledging messages.
Sub string // The full name of the subscription.
AckTick <-chan time.Time // AckTick supplies the frequency with which to make ack requests.
// Notify is called with an ack ID after the message with that ack ID
// has been processed. An ackID is considered to have been processed
// if at least one attempt has been made to acknowledge it.
Notify func(string)
ackBuffer
wg sync.WaitGroup
done chan struct{}
}
// Start intiates processing of ackIDs which are added via Add.
// Notify is called with each ackID once it has been processed.
func (a *acker) Start() {
a.done = make(chan struct{})
a.ackBuffer.Dirty = make(chan struct{})
a.ackBuffer.Done = a.done
a.wg.Add(1)
go func() {
defer a.wg.Done()
for {
select {
case <-a.ackBuffer.Dirty:
a.ack(a.ackBuffer.RemoveAll())
case <-a.AckTick:
a.ack(a.ackBuffer.RemoveAll())
case <-a.done:
return
}
}
}()
}
// Ack adds an ack id to be acked in the next batch.
func (a *acker) Ack(ackID string) {
a.ackBuffer.Add(ackID)
}
// FastMode switches acker into a mode which acks messages as they arrive, rather than waiting
// for a.AckTick.
func (a *acker) FastMode() {
a.ackBuffer.SendNotifications()
}
// Stop drops all pending messages, and releases resources before returning.
func (a *acker) Stop() {
close(a.done)
a.wg.Wait()
}
const maxAckAttempts = 2
// ack acknowledges the supplied ackIDs.
// After the acknowledgement request has completed (regardless of its success
// or failure), ids will be passed to a.Notify.
func (a *acker) ack(ids []string) {
head, tail := a.s.splitAckIDs(ids)
for len(head) > 0 {
for i := 0; i < maxAckAttempts; i++ {
if a.s.acknowledge(a.Ctx, a.Sub, head) == nil {
break
}
}
// NOTE: if retry gives up and returns an error, we simply drop
// those ack IDs. The messages will be redelivered and this is
// a documented behaviour of the API.
head, tail = a.s.splitAckIDs(tail)
}
for _, id := range ids {
a.Notify(id)
}
}

View file

@ -1,262 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"reflect"
"sort"
"testing"
"time"
"golang.org/x/net/context"
)
func TestAcker(t *testing.T) {
tick := make(chan time.Time)
s := &testService{acknowledgeCalled: make(chan acknowledgeCall)}
processed := make(chan string, 10)
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
AckTick: tick,
Notify: func(ackID string) { processed <- ackID },
}
acker.Start()
checkAckProcessed := func(ackIDs []string) {
got := <-s.acknowledgeCalled
sort.Strings(got.ackIDs)
want := acknowledgeCall{
subName: "subname",
ackIDs: ackIDs,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want)
}
}
acker.Ack("a")
acker.Ack("b")
tick <- time.Time{}
checkAckProcessed([]string{"a", "b"})
acker.Ack("c")
tick <- time.Time{}
checkAckProcessed([]string{"c"})
acker.Stop()
// all IDS should have been sent to processed.
close(processed)
processedIDs := []string{}
for id := range processed {
processedIDs = append(processedIDs, id)
}
sort.Strings(processedIDs)
want := []string{"a", "b", "c"}
if !reflect.DeepEqual(processedIDs, want) {
t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want)
}
}
func TestAckerFastMode(t *testing.T) {
tick := make(chan time.Time)
s := &testService{acknowledgeCalled: make(chan acknowledgeCall)}
processed := make(chan string, 10)
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
AckTick: tick,
Notify: func(ackID string) { processed <- ackID },
}
acker.Start()
checkAckProcessed := func(ackIDs []string) {
got := <-s.acknowledgeCalled
sort.Strings(got.ackIDs)
want := acknowledgeCall{
subName: "subname",
ackIDs: ackIDs,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want)
}
}
// No ticks are sent; fast mode doesn't need them.
acker.Ack("a")
acker.Ack("b")
acker.FastMode()
checkAckProcessed([]string{"a", "b"})
acker.Ack("c")
checkAckProcessed([]string{"c"})
acker.Stop()
// all IDS should have been sent to processed.
close(processed)
processedIDs := []string{}
for id := range processed {
processedIDs = append(processedIDs, id)
}
sort.Strings(processedIDs)
want := []string{"a", "b", "c"}
if !reflect.DeepEqual(processedIDs, want) {
t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want)
}
}
// TestAckerStop checks that Stop returns immediately.
func TestAckerStop(t *testing.T) {
tick := make(chan time.Time)
s := &testService{acknowledgeCalled: make(chan acknowledgeCall, 10)}
processed := make(chan string)
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
AckTick: tick,
Notify: func(ackID string) { processed <- ackID },
}
acker.Start()
stopped := make(chan struct{})
acker.Ack("a")
go func() {
acker.Stop()
stopped <- struct{}{}
}()
// Stopped should have been written to by the time this sleep completes.
time.Sleep(time.Millisecond)
// Receiving from processed should cause Stop to subsequently return,
// so it should never be possible to read from stopped before
// processed.
select {
case <-stopped:
case <-processed:
t.Errorf("acker.Stop processed an ack id before returning")
case <-time.After(time.Millisecond):
t.Errorf("acker.Stop never returned")
}
}
type ackCallResult struct {
ackIDs []string
err error
}
type ackService struct {
service
calls []ackCallResult
t *testing.T // used for error logging.
}
func (as *ackService) acknowledge(ctx context.Context, subName string, ackIDs []string) error {
if len(as.calls) == 0 {
as.t.Fatalf("unexpected call to acknowledge: ackIDs: %v", ackIDs)
}
call := as.calls[0]
as.calls = as.calls[1:]
if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) {
as.t.Errorf("unexpected arguments to acknowledge: got: %v ; want: %v", got, want)
}
return call.err
}
// Test implementation returns the first 2 elements as head, and the rest as tail.
func (as *ackService) splitAckIDs(ids []string) ([]string, []string) {
if len(ids) < 2 {
return ids, nil
}
return ids[:2], ids[2:]
}
func TestAckerSplitsBatches(t *testing.T) {
type testCase struct {
calls []ackCallResult
}
for _, tc := range []testCase{
{
calls: []ackCallResult{
{
ackIDs: []string{"a", "b"},
},
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
{
calls: []ackCallResult{
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// On error we retry once.
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// We give up after failing twice, so we move on to the next set, "c" and "d"
{
ackIDs: []string{"c", "d"},
err: errors.New("bang"),
},
// Again, we retry once.
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
} {
s := &ackService{
t: t,
calls: tc.calls,
}
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
Notify: func(string) {},
}
acker.ack([]string{"a", "b", "c", "d", "e", "f"})
if len(s.calls) != 0 {
t.Errorf("expected ack calls did not occur: %v", s.calls)
}
}
}

View file

@ -35,8 +35,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -75,6 +75,18 @@ func (s *mockPublisherServer) CreateTopic(ctx context.Context, req *pubsubpb.Top
return s.resps[0].(*pubsubpb.Topic), nil return s.resps[0].(*pubsubpb.Topic), nil
} }
func (s *mockPublisherServer) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest) (*pubsubpb.Topic, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*pubsubpb.Topic), nil
}
func (s *mockPublisherServer) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { func (s *mockPublisherServer) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) {
md, _ := metadata.FromIncomingContext(ctx) md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
@ -358,6 +370,18 @@ func (s *mockSubscriberServer) CreateSnapshot(ctx context.Context, req *pubsubpb
return s.resps[0].(*pubsubpb.Snapshot), nil return s.resps[0].(*pubsubpb.Snapshot), nil
} }
func (s *mockSubscriberServer) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest) (*pubsubpb.Snapshot, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*pubsubpb.Snapshot), nil
}
func (s *mockSubscriberServer) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest) (*emptypb.Empty, error) { func (s *mockSubscriberServer) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx) md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
@ -474,6 +498,69 @@ func TestPublisherCreateTopicError(t *testing.T) {
} }
_ = resp _ = resp
} }
func TestPublisherUpdateTopic(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &pubsubpb.Topic{
Name: name,
}
mockPublisher.err = nil
mockPublisher.reqs = nil
mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse)
var topic *pubsubpb.Topic = &pubsubpb.Topic{}
var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{}
var request = &pubsubpb.UpdateTopicRequest{
Topic: topic,
UpdateMask: updateMask,
}
c, err := NewPublisherClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateTopic(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestPublisherUpdateTopicError(t *testing.T) {
errCode := codes.PermissionDenied
mockPublisher.err = gstatus.Error(errCode, "test error")
var topic *pubsubpb.Topic = &pubsubpb.Topic{}
var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{}
var request = &pubsubpb.UpdateTopicRequest{
Topic: topic,
UpdateMask: updateMask,
}
c, err := NewPublisherClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateTopic(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestPublisherPublish(t *testing.T) { func TestPublisherPublish(t *testing.T) {
var messageIdsElement string = "messageIdsElement-744837059" var messageIdsElement string = "messageIdsElement-744837059"
var messageIds = []string{messageIdsElement} var messageIds = []string{messageIdsElement}
@ -1581,6 +1668,71 @@ func TestSubscriberCreateSnapshotError(t *testing.T) {
} }
_ = resp _ = resp
} }
func TestSubscriberUpdateSnapshot(t *testing.T) {
var name string = "name3373707"
var topic string = "topic110546223"
var expectedResponse = &pubsubpb.Snapshot{
Name: name,
Topic: topic,
}
mockSubscriber.err = nil
mockSubscriber.reqs = nil
mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse)
var snapshot *pubsubpb.Snapshot = &pubsubpb.Snapshot{}
var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{}
var request = &pubsubpb.UpdateSnapshotRequest{
Snapshot: snapshot,
UpdateMask: updateMask,
}
c, err := NewSubscriberClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateSnapshot(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSubscriberUpdateSnapshotError(t *testing.T) {
errCode := codes.PermissionDenied
mockSubscriber.err = gstatus.Error(errCode, "test error")
var snapshot *pubsubpb.Snapshot = &pubsubpb.Snapshot{}
var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{}
var request = &pubsubpb.UpdateSnapshotRequest{
Snapshot: snapshot,
UpdateMask: updateMask,
}
c, err := NewSubscriberClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateSnapshot(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSubscriberDeleteSnapshot(t *testing.T) { func TestSubscriberDeleteSnapshot(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{} var expectedResponse *emptypb.Empty = &emptypb.Empty{}

View file

@ -32,14 +32,10 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
publisherProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
publisherTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}")
)
// PublisherCallOptions contains the retry settings for each method of PublisherClient. // PublisherCallOptions contains the retry settings for each method of PublisherClient.
type PublisherCallOptions struct { type PublisherCallOptions struct {
CreateTopic []gax.CallOption CreateTopic []gax.CallOption
UpdateTopic []gax.CallOption
Publish []gax.CallOption Publish []gax.CallOption
GetTopic []gax.CallOption GetTopic []gax.CallOption
ListTopics []gax.CallOption ListTopics []gax.CallOption
@ -88,6 +84,7 @@ func defaultPublisherCallOptions() *PublisherCallOptions {
} }
return &PublisherCallOptions{ return &PublisherCallOptions{
CreateTopic: retry[[2]string{"default", "idempotent"}], CreateTopic: retry[[2]string{"default", "idempotent"}],
UpdateTopic: retry[[2]string{"default", "idempotent"}],
Publish: retry[[2]string{"messaging", "one_plus_delivery"}], Publish: retry[[2]string{"messaging", "one_plus_delivery"}],
GetTopic: retry[[2]string{"default", "idempotent"}], GetTopic: retry[[2]string{"default", "idempotent"}],
ListTopics: retry[[2]string{"default", "idempotent"}], ListTopics: retry[[2]string{"default", "idempotent"}],
@ -152,25 +149,20 @@ func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) {
// PublisherProjectPath returns the path for the project resource. // PublisherProjectPath returns the path for the project resource.
func PublisherProjectPath(project string) string { func PublisherProjectPath(project string) string {
path, err := publisherProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// PublisherTopicPath returns the path for the topic resource. // PublisherTopicPath returns the path for the topic resource.
func PublisherTopicPath(project, topic string) string { func PublisherTopicPath(project, topic string) string {
path, err := publisherTopicPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"topic": topic, project +
}) "/topics/" +
if err != nil { topic +
panic(err) ""
}
return path
} }
func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
@ -197,9 +189,30 @@ func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic,
return resp, nil return resp, nil
} }
// Publish adds one or more messages to the topic. Returns `NOT_FOUND` if the topic // UpdateTopic updates an existing topic. Note that certain properties of a topic are not
// modifiable. Options settings follow the style guide:
// NOTE: The style guide requires body: "topic" instead of body: "*".
// Keeping the latter for internal consistency in V1, however it should be
// corrected in V2. See
// https://cloud.google.com/apis/design/standard_methods#update for details.
func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...)
var resp *pubsubpb.Topic
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic
// does not exist. The message payload must not be empty; it must contain // does not exist. The message payload must not be empty; it must contain
// either a non-empty data field, or at least one attribute. // either a non-empty data field, or at least one attribute.
func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...)
@ -301,11 +314,11 @@ func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsu
return it return it
} }
// DeleteTopic deletes the topic with the given name. Returns `NOT_FOUND` if the topic // DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic
// does not exist. After a topic is deleted, a new topic may be created with // does not exist. After a topic is deleted, a new topic may be created with
// the same name; this is an entirely new topic with none of the old // the same name; this is an entirely new topic with none of the old
// configuration or subscriptions. Existing subscriptions to this topic are // configuration or subscriptions. Existing subscriptions to this topic are
// not deleted, but their `topic` field is set to `_deleted-topic_`. // not deleted, but their topic field is set to _deleted-topic_.
func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...)

View file

@ -85,6 +85,24 @@ func ExamplePublisherClient_CreateTopic() {
_ = resp _ = resp
} }
func ExamplePublisherClient_UpdateTopic() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.UpdateTopicRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateTopic(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExamplePublisherClient_Publish() { func ExamplePublisherClient_Publish() {
ctx := context.Background() ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx) c, err := pubsub.NewPublisherClient(ctx)

View file

@ -32,13 +32,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
subscriberProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
subscriberSnapshotPathTemplate = gax.MustCompilePathTemplate("projects/{project}/snapshots/{snapshot}")
subscriberSubscriptionPathTemplate = gax.MustCompilePathTemplate("projects/{project}/subscriptions/{subscription}")
subscriberTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}")
)
// SubscriberCallOptions contains the retry settings for each method of SubscriberClient. // SubscriberCallOptions contains the retry settings for each method of SubscriberClient.
type SubscriberCallOptions struct { type SubscriberCallOptions struct {
CreateSubscription []gax.CallOption CreateSubscription []gax.CallOption
@ -53,6 +46,7 @@ type SubscriberCallOptions struct {
ModifyPushConfig []gax.CallOption ModifyPushConfig []gax.CallOption
ListSnapshots []gax.CallOption ListSnapshots []gax.CallOption
CreateSnapshot []gax.CallOption CreateSnapshot []gax.CallOption
UpdateSnapshot []gax.CallOption
DeleteSnapshot []gax.CallOption DeleteSnapshot []gax.CallOption
Seek []gax.CallOption Seek []gax.CallOption
} }
@ -93,6 +87,21 @@ func defaultSubscriberCallOptions() *SubscriberCallOptions {
}) })
}), }),
}, },
{"streaming_messaging", "pull"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Canceled,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Internal,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
} }
return &SubscriberCallOptions{ return &SubscriberCallOptions{
CreateSubscription: retry[[2]string{"default", "idempotent"}], CreateSubscription: retry[[2]string{"default", "idempotent"}],
@ -103,10 +112,11 @@ func defaultSubscriberCallOptions() *SubscriberCallOptions {
ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}],
Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], Acknowledge: retry[[2]string{"messaging", "non_idempotent"}],
Pull: retry[[2]string{"messaging", "pull"}], Pull: retry[[2]string{"messaging", "pull"}],
StreamingPull: retry[[2]string{"messaging", "pull"}], StreamingPull: retry[[2]string{"streaming_messaging", "pull"}],
ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}],
ListSnapshots: retry[[2]string{"default", "idempotent"}], ListSnapshots: retry[[2]string{"default", "idempotent"}],
CreateSnapshot: retry[[2]string{"default", "idempotent"}], CreateSnapshot: retry[[2]string{"default", "idempotent"}],
UpdateSnapshot: retry[[2]string{"default", "idempotent"}],
DeleteSnapshot: retry[[2]string{"default", "idempotent"}], DeleteSnapshot: retry[[2]string{"default", "idempotent"}],
Seek: retry[[2]string{"default", "non_idempotent"}], Seek: retry[[2]string{"default", "non_idempotent"}],
} }
@ -130,7 +140,7 @@ type SubscriberClient struct {
// NewSubscriberClient creates a new subscriber client. // NewSubscriberClient creates a new subscriber client.
// //
// The service that an application uses to manipulate subscriptions and to // The service that an application uses to manipulate subscriptions and to
// consume messages from a subscription via the `Pull` method. // consume messages from a subscription via the Pull method.
func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...)
if err != nil { if err != nil {
@ -168,49 +178,40 @@ func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) {
// SubscriberProjectPath returns the path for the project resource. // SubscriberProjectPath returns the path for the project resource.
func SubscriberProjectPath(project string) string { func SubscriberProjectPath(project string) string {
path, err := subscriberProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// SubscriberSnapshotPath returns the path for the snapshot resource. // SubscriberSnapshotPath returns the path for the snapshot resource.
func SubscriberSnapshotPath(project, snapshot string) string { func SubscriberSnapshotPath(project, snapshot string) string {
path, err := subscriberSnapshotPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"snapshot": snapshot, project +
}) "/snapshots/" +
if err != nil { snapshot +
panic(err) ""
}
return path
} }
// SubscriberSubscriptionPath returns the path for the subscription resource. // SubscriberSubscriptionPath returns the path for the subscription resource.
func SubscriberSubscriptionPath(project, subscription string) string { func SubscriberSubscriptionPath(project, subscription string) string {
path, err := subscriberSubscriptionPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"subscription": subscription, project +
}) "/subscriptions/" +
if err != nil { subscription +
panic(err) ""
}
return path
} }
// SubscriberTopicPath returns the path for the topic resource. // SubscriberTopicPath returns the path for the topic resource.
func SubscriberTopicPath(project, topic string) string { func SubscriberTopicPath(project, topic string) string {
path, err := subscriberTopicPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"topic": topic, project +
}) "/topics/" +
if err != nil { topic +
panic(err) ""
}
return path
} }
func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
@ -222,13 +223,13 @@ func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
} }
// CreateSubscription creates a subscription to a given topic. // CreateSubscription creates a subscription to a given topic.
// If the subscription already exists, returns `ALREADY_EXISTS`. // If the subscription already exists, returns ALREADY_EXISTS.
// If the corresponding topic doesn't exist, returns `NOT_FOUND`. // If the corresponding topic doesn't exist, returns NOT_FOUND.
// //
// If the name is not provided in the request, the server will assign a random // If the name is not provided in the request, the server will assign a random
// name for this subscription on the same project as the topic, conforming // name for this subscription on the same project as the topic, conforming
// to the // to the
// [resource name format](https://cloud.google.com/pubsub/docs/overview#names). // resource name format (at https://cloud.google.com/pubsub/docs/overview#names).
// The generated name is populated in the returned Subscription object. // The generated name is populated in the returned Subscription object.
// Note that for REST API requests, you must specify a name in the request. // Note that for REST API requests, you must specify a name in the request.
func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
@ -264,6 +265,10 @@ func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.Ge
// UpdateSubscription updates an existing subscription. Note that certain properties of a // UpdateSubscription updates an existing subscription. Note that certain properties of a
// subscription, such as its topic, are not modifiable. // subscription, such as its topic, are not modifiable.
// NOTE: The style guide requires body: "subscription" instead of body: "*".
// Keeping the latter for internal consistency in V1, however it should be
// corrected in V2. See
// https://cloud.google.com/apis/design/standard_methods#update for details.
func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...)
@ -315,8 +320,8 @@ func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.
} }
// DeleteSubscription deletes an existing subscription. All messages retained in the subscription // DeleteSubscription deletes an existing subscription. All messages retained in the subscription
// are immediately dropped. Calls to `Pull` after deletion will return // are immediately dropped. Calls to Pull after deletion will return
// `NOT_FOUND`. After a subscription is deleted, a new one may be created with // NOT_FOUND. After a subscription is deleted, a new one may be created with
// the same name, but the new one has no association with the old // the same name, but the new one has no association with the old
// subscription or its topic unless the same topic is specified. // subscription or its topic unless the same topic is specified.
func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error {
@ -334,7 +339,7 @@ func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb
// to indicate that more time is needed to process a message by the // to indicate that more time is needed to process a message by the
// subscriber, or to make the message available for redelivery if the // subscriber, or to make the message available for redelivery if the
// processing was interrupted. Note that this does not modify the // processing was interrupted. Note that this does not modify the
// subscription-level `ackDeadlineSeconds` used for subsequent messages. // subscription-level ackDeadlineSeconds used for subsequent messages.
func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...)
@ -346,8 +351,8 @@ func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.
return err return err
} }
// Acknowledge acknowledges the messages associated with the `ack_ids` in the // Acknowledge acknowledges the messages associated with the ack_ids in the
// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages // AcknowledgeRequest. The Pub/Sub system can remove the relevant messages
// from the subscription. // from the subscription.
// //
// Acknowledging a message whose ack deadline has expired may succeed, // Acknowledging a message whose ack deadline has expired may succeed,
@ -365,7 +370,7 @@ func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.Acknow
} }
// Pull pulls messages from the server. Returns an empty list if there are no // Pull pulls messages from the server. Returns an empty list if there are no
// messages available in the backlog. The server may return `UNAVAILABLE` if // messages available in the backlog. The server may return UNAVAILABLE if
// there are too many concurrent pull requests pending for the given // there are too many concurrent pull requests pending for the given
// subscription. // subscription.
func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) {
@ -390,9 +395,9 @@ func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest,
// Establishes a stream with the server, which sends messages down to the // Establishes a stream with the server, which sends messages down to the
// client. The client streams acknowledgements and ack deadline modifications // client. The client streams acknowledgements and ack deadline modifications
// back to the server. The server will close the stream and return the status // back to the server. The server will close the stream and return the status
// on any error. The server may close the stream with status `OK` to reassign // on any error. The server may close the stream with status OK to reassign
// server-side resources, in which case, the client should re-establish the // server-side resources, in which case, the client should re-establish the
// stream. `UNAVAILABLE` may also be returned in the case of a transient error // stream. UNAVAILABLE may also be returned in the case of a transient error
// (e.g., a server restart). These should also be retried by the client. Flow // (e.g., a server restart). These should also be retried by the client. Flow
// control can be achieved by configuring the underlying RPC channel. // control can be achieved by configuring the underlying RPC channel.
func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) {
@ -410,12 +415,12 @@ func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOp
return resp, nil return resp, nil
} }
// ModifyPushConfig modifies the `PushConfig` for a specified subscription. // ModifyPushConfig modifies the PushConfig for a specified subscription.
// //
// This may be used to change a push subscription to a pull one (signified by // This may be used to change a push subscription to a pull one (signified by
// an empty `PushConfig`) or vice versa, or change the endpoint URL and other // an empty PushConfig) or vice versa, or change the endpoint URL and other
// attributes of a push subscription. Messages will accumulate for delivery // attributes of a push subscription. Messages will accumulate for delivery
// continuously through the call regardless of changes to the `PushConfig`. // continuously through the call regardless of changes to the PushConfig.
func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...)
@ -463,13 +468,13 @@ func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.List
} }
// CreateSnapshot creates a snapshot from the requested subscription. // CreateSnapshot creates a snapshot from the requested subscription.
// If the snapshot already exists, returns `ALREADY_EXISTS`. // If the snapshot already exists, returns ALREADY_EXISTS.
// If the requested subscription doesn't exist, returns `NOT_FOUND`. // If the requested subscription doesn't exist, returns NOT_FOUND.
// //
// If the name is not provided in the request, the server will assign a random // If the name is not provided in the request, the server will assign a random
// name for this snapshot on the same project as the subscription, conforming // name for this snapshot on the same project as the subscription, conforming
// to the // to the
// [resource name format](https://cloud.google.com/pubsub/docs/overview#names). // resource name format (at https://cloud.google.com/pubsub/docs/overview#names).
// The generated name is populated in the returned Snapshot object. // The generated name is populated in the returned Snapshot object.
// Note that for REST API requests, you must specify a name in the request. // Note that for REST API requests, you must specify a name in the request.
func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
@ -487,6 +492,27 @@ func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.Cre
return resp, nil return resp, nil
} }
// UpdateSnapshot updates an existing snapshot. Note that certain properties of a snapshot
// are not modifiable.
// NOTE: The style guide requires body: "snapshot" instead of body: "*".
// Keeping the latter for internal consistency in V1, however it should be
// corrected in V2. See
// https://cloud.google.com/apis/design/standard_methods#update for details.
func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...)
var resp *pubsubpb.Snapshot
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot // DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot
// are immediately dropped. After a snapshot is deleted, a new one may be // are immediately dropped. After a snapshot is deleted, a new one may be
// created with the same name, but the new one has no association with the old // created with the same name, but the new one has no association with the old

View file

@ -305,6 +305,24 @@ func ExampleSubscriberClient_CreateSnapshot() {
_ = resp _ = resp
} }
func ExampleSubscriberClient_UpdateSnapshot() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.UpdateSnapshotRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateSnapshot(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleSubscriberClient_DeleteSnapshot() { func ExampleSubscriberClient_DeleteSnapshot() {
ctx := context.Background() ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx) c, err := pubsub.NewSubscriberClient(ctx)

View file

@ -49,6 +49,18 @@ func ExampleClient_CreateTopic() {
_ = topic // TODO: use the topic. _ = topic // TODO: use the topic.
} }
// Use TopicInProject to refer to a topic that is not in the client's project, such
// as a public topic.
func ExampleClient_TopicInProject() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
topic := client.TopicInProject("topicName", "another-project-id")
_ = topic // TODO: use the topic.
}
func ExampleClient_CreateSubscription() { func ExampleClient_CreateSubscription() {
ctx := context.Background() ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id") client, err := pubsub.NewClient(ctx, "project-id")

View file

@ -117,7 +117,7 @@ func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) err
// Add a slight delay to ensure the server receives any // Add a slight delay to ensure the server receives any
// messages en route from the client before shutting down the stream. // messages en route from the client before shutting down the stream.
// This reduces flakiness of tests involving retry. // This reduces flakiness of tests involving retry.
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
} }
if pr.err == io.EOF { if pr.err == io.EOF {
return nil return nil

View file

@ -31,6 +31,11 @@ import (
"google.golang.org/api/option" "google.golang.org/api/option"
) )
var (
topicIDs = testutil.NewUIDSpace("topic")
subIDs = testutil.NewUIDSpace("sub")
)
// messageData is used to hold the contents of a message so that it can be compared against the contents // messageData is used to hold the contents of a message so that it can be compared against the contents
// of another message without regard to irrelevant fields. // of another message without regard to irrelevant fields.
type messageData struct { type messageData struct {
@ -47,8 +52,7 @@ func extractMessageData(m *Message) *messageData {
} }
} }
func TestAll(t *testing.T) { func integrationTestClient(t *testing.T, ctx context.Context) *Client {
t.Parallel()
if testing.Short() { if testing.Short() {
t.Skip("Integration tests skipped in short mode") t.Skip("Integration tests skipped in short mode")
} }
@ -56,30 +60,31 @@ func TestAll(t *testing.T) {
if projID == "" { if projID == "" {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
} }
ctx := context.Background()
ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
if ts == nil { if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
} }
now := time.Now()
topicName := fmt.Sprintf("topic-%d", now.Unix())
subName := fmt.Sprintf("subscription-%d", now.Unix())
client, err := NewClient(ctx, projID, option.WithTokenSource(ts)) client, err := NewClient(ctx, projID, option.WithTokenSource(ts))
if err != nil { if err != nil {
t.Fatalf("Creating client error: %v", err) t.Fatalf("Creating client error: %v", err)
} }
return client
}
func TestAll(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := integrationTestClient(t, ctx)
defer client.Close() defer client.Close()
var topic *Topic topic, err := client.CreateTopic(ctx, topicIDs.New())
if topic, err = client.CreateTopic(ctx, topicName); err != nil { if err != nil {
t.Errorf("CreateTopic error: %v", err) t.Errorf("CreateTopic error: %v", err)
} }
defer topic.Stop() defer topic.Stop()
var sub *Subscription var sub *Subscription
if sub, err = client.CreateSubscription(ctx, subName, SubscriptionConfig{Topic: topic}); err != nil { if sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}); err != nil {
t.Errorf("CreateSub error: %v", err) t.Errorf("CreateSub error: %v", err)
} }
@ -88,7 +93,7 @@ func TestAll(t *testing.T) {
t.Fatalf("TopicExists error: %v", err) t.Fatalf("TopicExists error: %v", err)
} }
if !exists { if !exists {
t.Errorf("topic %s should exist, but it doesn't", topic) t.Errorf("topic %v should exist, but it doesn't", topic)
} }
exists, err = sub.Exists(ctx) exists, err = sub.Exists(ctx)
@ -96,10 +101,10 @@ func TestAll(t *testing.T) {
t.Fatalf("SubExists error: %v", err) t.Fatalf("SubExists error: %v", err)
} }
if !exists { if !exists {
t.Errorf("subscription %s should exist, but it doesn't", subName) t.Errorf("subscription %s should exist, but it doesn't", sub.ID())
} }
msgs := []*Message{} var msgs []*Message
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
text := fmt.Sprintf("a message with an index %d", i) text := fmt.Sprintf("a message with an index %d", i)
attrs := make(map[string]string) attrs := make(map[string]string)
@ -275,37 +280,18 @@ func testIAM(ctx context.Context, h *iam.Handle, permission string) (msg string,
func TestSubscriptionUpdate(t *testing.T) { func TestSubscriptionUpdate(t *testing.T) {
t.Parallel() t.Parallel()
ctx := context.Background() ctx := context.Background()
if testing.Short() { client := integrationTestClient(t, ctx)
t.Skip("Integration tests skipped in short mode")
}
projID := testutil.ProjID()
if projID == "" {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details.")
}
ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
now := time.Now()
topicName := fmt.Sprintf("topic-modify-%d", now.Unix())
subName := fmt.Sprintf("subscription-modify-%d", now.Unix())
client, err := NewClient(ctx, projID, option.WithTokenSource(ts))
if err != nil {
t.Fatalf("Creating client error: %v", err)
}
defer client.Close() defer client.Close()
var topic *Topic topic, err := client.CreateTopic(ctx, topicIDs.New())
if topic, err = client.CreateTopic(ctx, topicName); err != nil { if err != nil {
t.Fatalf("CreateTopic error: %v", err) t.Fatalf("CreateTopic error: %v", err)
} }
defer topic.Stop() defer topic.Stop()
defer topic.Delete(ctx) defer topic.Delete(ctx)
var sub *Subscription var sub *Subscription
if sub, err = client.CreateSubscription(ctx, subName, SubscriptionConfig{Topic: topic}); err != nil { if sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}); err != nil {
t.Fatalf("CreateSub error: %v", err) t.Fatalf("CreateSub error: %v", err)
} }
defer sub.Delete(ctx) defer sub.Delete(ctx)
@ -318,6 +304,7 @@ func TestSubscriptionUpdate(t *testing.T) {
t.Fatalf("got %+v, want empty PushConfig") t.Fatalf("got %+v, want empty PushConfig")
} }
// Add a PushConfig. // Add a PushConfig.
projID := testutil.ProjID()
pc := PushConfig{ pc := PushConfig{
Endpoint: "https://" + projID + ".appspot.com/_ah/push-handlers/push", Endpoint: "https://" + projID + ".appspot.com/_ah/push-handlers/push",
Attributes: map[string]string{"x-goog-version": "v1"}, Attributes: map[string]string{"x-goog-version": "v1"},
@ -349,3 +336,28 @@ func TestSubscriptionUpdate(t *testing.T) {
t.Fatal("got nil, wanted error") t.Fatal("got nil, wanted error")
} }
} }
func TestPublicTopic(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := integrationTestClient(t, ctx)
defer client.Close()
sub, err := client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{
Topic: client.TopicInProject("taxirides-realtime", "pubsub-public-data"),
})
if err != nil {
t.Fatal(err)
}
defer sub.Delete(ctx)
// Confirm that Receive works. It doesn't matter if we actually get any
// messages.
ctxt, cancel := context.WithTimeout(ctx, 5*time.Second)
err = sub.Receive(ctxt, func(_ context.Context, msg *Message) {
msg.Ack()
cancel()
})
if err != nil {
t.Fatal(err)
}
}

View file

@ -15,201 +15,21 @@
package pubsub package pubsub
import ( import (
"log"
"sync" "sync"
"time" "time"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/support/bundler"
pb "google.golang.org/genproto/googleapis/pubsub/v1" pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
) )
type messageIterator struct { // newMessageIterator starts a new streamingMessageIterator. Stop must be called on the messageIterator
impl interface {
next() (*Message, error)
stop()
}
}
type pollingMessageIterator struct {
// kaTicker controls how often we send an ack deadline extension request.
kaTicker *time.Ticker
// ackTicker controls how often we acknowledge a batch of messages.
ackTicker *time.Ticker
ka *keepAlive
acker *acker
nacker *bundler.Bundler
puller *puller
// mu ensures that cleanup only happens once, and concurrent Stop
// invocations block until cleanup completes.
mu sync.Mutex
// closed is used to signal that Stop has been called.
closed chan struct{}
}
var useStreamingPull = false
// newMessageIterator starts a new messageIterator. Stop must be called on the messageIterator
// when it is no longer needed. // when it is no longer needed.
// subName is the full name of the subscription to pull messages from. // subName is the full name of the subscription to pull messages from.
// ctx is the context to use for acking messages and extending message deadlines. // ctx is the context to use for acking messages and extending message deadlines.
func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *messageIterator { func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *streamingMessageIterator {
if !useStreamingPull {
return &messageIterator{
impl: newPollingMessageIterator(ctx, s, subName, po),
}
}
sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds())) sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds()))
err := sp.open() _ = sp.open() // error stored in sp
if grpc.Code(err) == codes.Unimplemented { return newStreamingMessageIterator(ctx, sp, po)
log.Println("pubsub: streaming pull unimplemented; falling back to legacy pull")
return &messageIterator{
impl: newPollingMessageIterator(ctx, s, subName, po),
}
}
// TODO(jba): handle other non-nil error?
log.Println("using streaming pull")
return &messageIterator{
impl: newStreamingMessageIterator(ctx, sp, po),
}
}
func newPollingMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *pollingMessageIterator {
// TODO: make kaTicker frequency more configurable.
// (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace.
keepAlivePeriod := po.ackDeadline - 5*time.Second
kaTicker := time.NewTicker(keepAlivePeriod) // Stopped in it.Stop
// Ack promptly so users don't lose work if client crashes.
ackTicker := time.NewTicker(100 * time.Millisecond) // Stopped in it.Stop
ka := &keepAlive{
s: s,
Ctx: ctx,
Sub: subName,
ExtensionTick: kaTicker.C,
Deadline: po.ackDeadline,
MaxExtension: po.maxExtension,
}
ack := &acker{
s: s,
Ctx: ctx,
Sub: subName,
AckTick: ackTicker.C,
Notify: ka.Remove,
}
nacker := bundler.NewBundler("", func(ackIDs interface{}) {
// NACK by setting the ack deadline to zero, to make the message
// immediately available for redelivery.
//
// If the RPC fails, nothing we can do about it. In the worst case, the
// deadline for these messages will expire and they will still get
// redelivered.
_ = s.modifyAckDeadline(ctx, subName, 0, ackIDs.([]string))
})
nacker.DelayThreshold = 100 * time.Millisecond // nack promptly
nacker.BundleCountThreshold = 10
pull := newPuller(s, subName, ctx, po.maxPrefetch, ka.Add, ka.Remove)
ka.Start()
ack.Start()
return &pollingMessageIterator{
kaTicker: kaTicker,
ackTicker: ackTicker,
ka: ka,
acker: ack,
nacker: nacker,
puller: pull,
closed: make(chan struct{}),
}
}
// Next returns the next Message to be processed. The caller must call
// Message.Done when finished with it.
// Once Stop has been called, calls to Next will return iterator.Done.
func (it *messageIterator) Next() (*Message, error) {
return it.impl.next()
}
func (it *pollingMessageIterator) next() (*Message, error) {
m, err := it.puller.Next()
if err == nil {
m.doneFunc = it.done
return m, nil
}
select {
// If Stop has been called, we return Done regardless the value of err.
case <-it.closed:
return nil, iterator.Done
default:
return nil, err
}
}
// Client code must call Stop on a messageIterator when finished with it.
// Stop will block until Done has been called on all Messages that have been
// returned by Next, or until the context with which the messageIterator was created
// is cancelled or exceeds its deadline.
// Stop need only be called once, but may be called multiple times from
// multiple goroutines.
func (it *messageIterator) Stop() {
it.impl.stop()
}
func (it *pollingMessageIterator) stop() {
it.mu.Lock()
defer it.mu.Unlock()
select {
case <-it.closed:
// Cleanup has already been performed.
return
default:
}
// We close this channel before calling it.puller.Stop to ensure that we
// reliably return iterator.Done from Next.
close(it.closed)
// Stop the puller. Once this completes, no more messages will be added
// to it.ka.
it.puller.Stop()
// Start acking messages as they arrive, ignoring ackTicker. This will
// result in it.ka.Stop, below, returning as soon as possible.
it.acker.FastMode()
// This will block until
// (a) it.ka.Ctx is done, or
// (b) all messages have been removed from keepAlive.
// (b) will happen once all outstanding messages have been either ACKed or NACKed.
it.ka.Stop()
// There are no more live messages, so kill off the acker.
it.acker.Stop()
it.nacker.Flush()
it.kaTicker.Stop()
it.ackTicker.Stop()
}
func (it *pollingMessageIterator) done(ackID string, ack bool) {
if ack {
it.acker.Ack(ackID)
// There's no need to call it.ka.Remove here, as acker will
// call it via its Notify function.
} else {
it.ka.Remove(ackID)
_ = it.nacker.Add(ackID, len(ackID)) // ignore error; this is just an optimization
}
} }
type streamingMessageIterator struct { type streamingMessageIterator struct {
@ -222,7 +42,6 @@ type streamingMessageIterator struct {
failed chan struct{} // closed on stream error failed chan struct{} // closed on stream error
stopped chan struct{} // closed when Stop is called stopped chan struct{} // closed when Stop is called
drained chan struct{} // closed when stopped && no more pending messages drained chan struct{} // closed when stopped && no more pending messages
msgc chan *Message
wg sync.WaitGroup wg sync.WaitGroup
mu sync.Mutex mu sync.Mutex
@ -242,104 +61,36 @@ func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *p
ackTicker := time.NewTicker(100 * time.Millisecond) ackTicker := time.NewTicker(100 * time.Millisecond)
nackTicker := time.NewTicker(100 * time.Millisecond) nackTicker := time.NewTicker(100 * time.Millisecond)
it := &streamingMessageIterator{ it := &streamingMessageIterator{
ctx: ctx, ctx: ctx,
sp: sp, sp: sp,
po: po, po: po,
kaTicker: kaTicker, kaTicker: kaTicker,
ackTicker: ackTicker, ackTicker: ackTicker,
nackTicker: nackTicker, nackTicker: nackTicker,
failed: make(chan struct{}), failed: make(chan struct{}),
stopped: make(chan struct{}), stopped: make(chan struct{}),
drained: make(chan struct{}), drained: make(chan struct{}),
// use maxPrefetch as the channel's buffer size.
msgc: make(chan *Message, po.maxPrefetch),
keepAliveDeadlines: map[string]time.Time{}, keepAliveDeadlines: map[string]time.Time{},
pendingReq: &pb.StreamingPullRequest{}, pendingReq: &pb.StreamingPullRequest{},
} }
it.wg.Add(2) it.wg.Add(1)
go it.receiver()
go it.sender() go it.sender()
return it return it
} }
func (it *streamingMessageIterator) next() (*Message, error) { // Subscription.receive will call stop on its messageIterator when finished with it.
// If ctx has been cancelled or the iterator is done, return straight // Stop will block until Done has been called on all Messages that have been
// away (even if there are buffered messages available). // returned by Next, or until the context with which the messageIterator was created
select { // is cancelled or exceeds its deadline.
case <-it.ctx.Done():
return nil, it.ctx.Err()
case <-it.failed:
break
case <-it.stopped:
break
default:
// Wait for a message, but also for one of the above conditions.
select {
case msg := <-it.msgc:
// Since active select cases are chosen at random, this can return
// nil (from the channel close) even if it.failed or it.stopped is
// closed.
if msg == nil {
break
}
msg.doneFunc = it.done
return msg, nil
case <-it.ctx.Done():
return nil, it.ctx.Err()
case <-it.failed:
break
case <-it.stopped:
break
}
}
// Here if the iterator is done.
it.mu.Lock()
defer it.mu.Unlock()
return nil, it.err
}
func (it *streamingMessageIterator) stop() { func (it *streamingMessageIterator) stop() {
it.mu.Lock() it.mu.Lock()
select { select {
case <-it.stopped: case <-it.stopped:
it.mu.Unlock()
it.wg.Wait()
return
default: default:
close(it.stopped) close(it.stopped)
} }
if it.err == nil {
it.err = iterator.Done
}
// Before reading from the channel, see if we're already drained.
it.checkDrained() it.checkDrained()
it.mu.Unlock() it.mu.Unlock()
// Nack all the pending messages.
// Grab the lock separately for each message to allow the receiver
// and sender goroutines to make progress.
// Why this will eventually terminate:
// - If the receiver is not blocked on a stream Recv, then
// it will write all the messages it has received to the channel,
// then exit, closing the channel.
// - If the receiver is blocked, then this loop will eventually
// nack all the messages in the channel. Once done is called
// on the remaining messages, the iterator will be marked as drained,
// which will trigger the sender to terminate. When it does, it
// performs a CloseSend on the stream, which will result in the blocked
// stream Recv returning.
for m := range it.msgc {
it.mu.Lock()
delete(it.keepAliveDeadlines, m.ackID)
it.addDeadlineMod(m.ackID, 0)
it.checkDrained()
it.mu.Unlock()
}
it.wg.Wait() it.wg.Wait()
} }
@ -394,52 +145,40 @@ func (it *streamingMessageIterator) fail(err error) {
it.mu.Unlock() it.mu.Unlock()
} }
// receiver runs in a goroutine and handles all receives from the stream. // receive makes a call to the stream's Recv method and returns
func (it *streamingMessageIterator) receiver() { // its messages.
defer it.wg.Done() func (it *streamingMessageIterator) receive() ([]*Message, error) {
defer close(it.msgc) // Stop retrieving messages if the context is done, the stream
for { // failed, or the iterator's Stop method was called.
// Stop retrieving messages if the context is done, the stream select {
// failed, or the iterator's Stop method was called. case <-it.ctx.Done():
select { return nil, it.ctx.Err()
case <-it.ctx.Done(): default:
return
case <-it.failed:
return
case <-it.stopped:
return
default:
}
// Receive messages from stream. This may block indefinitely.
msgs, err := it.sp.fetchMessages()
// The streamingPuller handles retries, so any error here
// is fatal to the iterator.
if err != nil {
it.fail(err)
return
}
// We received some messages. Remember them so we can
// keep them alive.
deadline := time.Now().Add(it.po.maxExtension)
it.mu.Lock()
for _, m := range msgs {
it.keepAliveDeadlines[m.ackID] = deadline
}
it.mu.Unlock()
// Deliver the messages to the channel.
for _, m := range msgs {
select {
case <-it.ctx.Done():
return
case <-it.failed:
return
// Don't return if stopped. We want to send the remaining
// messages on the channel, where they will be nacked.
case it.msgc <- m:
}
}
} }
it.mu.Lock()
err := it.err
it.mu.Unlock()
if err != nil {
return nil, err
}
// Receive messages from stream. This may block indefinitely.
msgs, err := it.sp.fetchMessages()
// The streamingPuller handles retries, so any error here
// is fatal.
if err != nil {
it.fail(err)
return nil, err
}
// We received some messages. Remember them so we can
// keep them alive.
deadline := time.Now().Add(it.po.maxExtension)
it.mu.Lock()
for _, m := range msgs {
m.doneFunc = it.done
it.keepAliveDeadlines[m.ackID] = deadline
}
it.mu.Unlock()
return msgs, nil
} }
// sender runs in a goroutine and handles all sends to the stream. // sender runs in a goroutine and handles all sends to the stream.
@ -518,3 +257,15 @@ func (it *streamingMessageIterator) handleKeepAlives() bool {
it.checkDrained() it.checkDrained()
return len(live) > 0 return len(live) > 0
} }
func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) {
now := time.Now()
for id, expiry := range items {
if expiry.Before(now) {
expired = append(expired, id)
} else {
live = append(live, id)
}
}
return live, expired
}

View file

@ -1,338 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"fmt"
"reflect"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func TestReturnsDoneOnStop(t *testing.T) {
if useStreamingPull {
t.Skip("iterator tests are for polling pull only")
}
type testCase struct {
abort func(*messageIterator, context.CancelFunc)
want error
}
for _, tc := range []testCase{
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
},
want: context.Canceled,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
cancel()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
it.Stop()
},
want: iterator.Done,
},
} {
s := &blockingFetch{}
ctx, cancel := context.WithCancel(context.Background())
it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: time.Hour})
defer it.Stop()
tc.abort(it, cancel)
_, err := it.Next()
if err != tc.want {
t.Errorf("iterator Next error after abort: got:\n%v\nwant:\n%v", err, tc.want)
}
}
}
// blockingFetch implements message fetching by not returning until its context is cancelled.
type blockingFetch struct {
service
}
func (s *blockingFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
<-ctx.Done()
return nil, ctx.Err()
}
func (s *blockingFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller {
return nil
}
// justInTimeFetch simulates the situation where the iterator is aborted just after the fetch RPC
// succeeds, so the rest of puller.Next will continue to execute and return sucessfully.
type justInTimeFetch struct {
service
}
func (s *justInTimeFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
<-ctx.Done()
// The context was cancelled, but let's pretend that this happend just after our RPC returned.
var result []*Message
for i := 0; i < int(maxMessages); i++ {
val := fmt.Sprintf("msg%v", i)
result = append(result, &Message{Data: []byte(val), ackID: val})
}
return result, nil
}
func (s *justInTimeFetch) splitAckIDs(ids []string) ([]string, []string) {
return nil, nil
}
func (s *justInTimeFetch) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
return nil
}
func (s *justInTimeFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller {
return nil
}
func TestAfterAbortReturnsNoMoreThanOneMessage(t *testing.T) {
// Each test case is excercised by making two concurrent blocking calls on a
// messageIterator, and then aborting the iterator.
// The result should be one call to Next returning a message, and the other returning an error.
t.Skip(`This test has subtle timing dependencies, making it flaky.
It is not worth fixing because iterators will be removed shortly.`)
type testCase struct {
abort func(*messageIterator, context.CancelFunc)
// want is the error that should be returned from one Next invocation.
want error
}
for n := 1; n < 3; n++ {
for _, tc := range []testCase{
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
},
want: context.Canceled,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
cancel()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
it.Stop()
},
want: iterator.Done,
},
} {
s := &justInTimeFetch{}
ctx, cancel := context.WithCancel(context.Background())
// if maxPrefetch == 1, there will be no messages in the puller buffer when Next is invoked the second time.
// if maxPrefetch == 2, there will be 1 message in the puller buffer when Next is invoked the second time.
po := &pullOptions{
ackDeadline: time.Second * 10,
maxExtension: time.Hour,
maxPrefetch: int32(n),
}
it := newMessageIterator(ctx, s, "subname", po)
defer it.Stop()
type result struct {
m *Message
err error
}
results := make(chan *result, 2)
for i := 0; i < 2; i++ {
go func() {
m, err := it.Next()
results <- &result{m, err}
if err == nil {
m.Nack()
}
}()
}
// Wait for goroutines to block on it.Next().
time.Sleep(50 * time.Millisecond)
tc.abort(it, cancel)
result1 := <-results
result2 := <-results
// There should be one error result, and one non-error result.
// Make result1 be the non-error result.
if result1.err != nil {
result1, result2 = result2, result1
}
if string(result1.m.Data) != "msg0" {
t.Errorf("After abort, got message: %v, want %v", result1.m.Data, "msg0")
}
if result1.err != nil {
t.Errorf("After abort, got : %v, want nil", result1.err)
}
if result2.m != nil {
t.Errorf("After abort, got message: %v, want nil", result2.m)
}
if result2.err != tc.want {
t.Errorf("After abort, got err: %v, want %v", result2.err, tc.want)
}
}
}
}
type fetcherServiceWithModifyAckDeadline struct {
fetcherService
events chan string
}
func (f *fetcherServiceWithModifyAckDeadline) modifyAckDeadline(_ context.Context, _ string, d time.Duration, ids []string) error {
// Different versions of Go use different representations for time.Duration(0).
var ds string
if d == 0 {
ds = "0s"
} else {
ds = d.String()
}
f.events <- fmt.Sprintf("modAck(%v, %s)", ids, ds)
return nil
}
func (f *fetcherServiceWithModifyAckDeadline) splitAckIDs(ackIDs []string) ([]string, []string) {
return ackIDs, nil
}
func (f *fetcherServiceWithModifyAckDeadline) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller {
return nil
}
func TestMultipleStopCallsBlockUntilMessageDone(t *testing.T) {
t.Skip(`This test has subtle timing dependencies, making it flaky.
It is not worth fixing because iterators will be removed shortly.`)
events := make(chan string, 3)
s := &fetcherServiceWithModifyAckDeadline{
fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
},
},
events,
}
ctx := context.Background()
it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: 0})
m, err := it.Next()
if err != nil {
t.Errorf("error calling Next: %v", err)
}
go func() {
it.Stop()
events <- "stopped"
}()
go func() {
it.Stop()
events <- "stopped"
}()
select {
case <-events:
t.Fatal("Stop is not blocked")
case <-time.After(100 * time.Millisecond):
}
m.Nack()
got := []string{<-events, <-events, <-events}
want := []string{"modAck([a], 0s)", "stopped", "stopped"}
if !reflect.DeepEqual(got, want) {
t.Errorf("stopping iterator, got: %v ; want: %v", got, want)
}
// The iterator is stopped, so should not return another message.
m, err = it.Next()
if m != nil {
t.Errorf("message got: %v ; want: nil", m)
}
if err != iterator.Done {
t.Errorf("err got: %v ; want: %v", err, iterator.Done)
}
}
func TestFastNack(t *testing.T) {
if useStreamingPull {
t.Skip("iterator tests are for polling pull only")
}
events := make(chan string, 3)
s := &fetcherServiceWithModifyAckDeadline{
fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
},
},
events,
}
ctx := context.Background()
it := newMessageIterator(ctx, s, "subname", &pullOptions{
ackDeadline: time.Second * 6,
maxExtension: time.Second * 10,
})
// Get both messages.
_, err := it.Next()
if err != nil {
t.Errorf("error calling Next: %v", err)
}
m2, err := it.Next()
if err != nil {
t.Errorf("error calling Next: %v", err)
}
// Ignore the first, nack the second.
m2.Nack()
got := []string{<-events, <-events}
// The nack should happen before the deadline extension.
want := []string{"modAck([b], 0s)", "modAck([a], 6s)"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got: %v ; want: %v", got, want)
}
}

View file

@ -1,182 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"sync"
"time"
"golang.org/x/net/context"
)
// keepAlive keeps track of which Messages need to have their deadline extended, and
// periodically extends them.
// Messages are tracked by Ack ID.
type keepAlive struct {
s service
Ctx context.Context // The context to use when extending deadlines.
Sub string // The full name of the subscription.
ExtensionTick <-chan time.Time // ExtensionTick supplies the frequency with which to make extension requests.
Deadline time.Duration // How long to extend messages for each time they are extended. Should be greater than ExtensionTick frequency.
MaxExtension time.Duration // How long to keep extending each message's ack deadline before automatically removing it.
mu sync.Mutex
// key: ackID; value: time at which ack deadline extension should cease.
items map[string]time.Time
dr drain
wg sync.WaitGroup
}
// Start initiates the deadline extension loop. Stop must be called once keepAlive is no longer needed.
func (ka *keepAlive) Start() {
ka.items = make(map[string]time.Time)
ka.dr = drain{Drained: make(chan struct{})}
ka.wg.Add(1)
go func() {
defer ka.wg.Done()
for {
select {
case <-ka.Ctx.Done():
// Don't bother waiting for items to be removed: we can't extend them any more.
return
case <-ka.dr.Drained:
return
case <-ka.ExtensionTick:
live, expired := ka.getAckIDs()
ka.wg.Add(1)
go func() {
defer ka.wg.Done()
ka.extendDeadlines(live)
}()
for _, id := range expired {
ka.Remove(id)
}
}
}
}()
}
// Add adds an ack id to be kept alive.
// It should not be called after Stop.
func (ka *keepAlive) Add(ackID string) {
ka.mu.Lock()
defer ka.mu.Unlock()
ka.items[ackID] = time.Now().Add(ka.MaxExtension)
ka.dr.SetPending(true)
}
// Remove removes ackID from the list to be kept alive.
func (ka *keepAlive) Remove(ackID string) {
ka.mu.Lock()
defer ka.mu.Unlock()
// Note: If users NACKs a message after it has been removed due to
// expiring, Remove will be called twice with same ack id. This is OK.
delete(ka.items, ackID)
ka.dr.SetPending(len(ka.items) != 0)
}
// Stop waits until all added ackIDs have been removed, and cleans up resources.
// Stop may only be called once.
func (ka *keepAlive) Stop() {
ka.mu.Lock()
ka.dr.Drain()
ka.mu.Unlock()
ka.wg.Wait()
}
// getAckIDs returns the set of ackIDs that are being kept alive.
// The set is divided into two lists: one with IDs that should continue to be kept alive,
// and the other with IDs that should be dropped.
func (ka *keepAlive) getAckIDs() (live, expired []string) {
ka.mu.Lock()
defer ka.mu.Unlock()
return getKeepAliveAckIDs(ka.items)
}
func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) {
now := time.Now()
for id, expiry := range items {
if expiry.Before(now) {
expired = append(expired, id)
} else {
live = append(live, id)
}
}
return live, expired
}
const maxExtensionAttempts = 2
func (ka *keepAlive) extendDeadlines(ackIDs []string) {
head, tail := ka.s.splitAckIDs(ackIDs)
for len(head) > 0 {
for i := 0; i < maxExtensionAttempts; i++ {
if ka.s.modifyAckDeadline(ka.Ctx, ka.Sub, ka.Deadline, head) == nil {
break
}
}
// NOTE: Messages whose deadlines we fail to extend will
// eventually be redelivered and this is a documented behaviour
// of the API.
//
// NOTE: If we fail to extend deadlines here, this
// implementation will continue to attempt extending the
// deadlines for those ack IDs the next time the extension
// ticker ticks. By then the deadline will have expired.
// Re-extending them is harmless, however.
//
// TODO: call Remove for ids which fail to be extended.
head, tail = ka.s.splitAckIDs(tail)
}
}
// A drain (once started) indicates via a channel when there is no work pending.
type drain struct {
started bool
pending bool
// Drained is closed once there are no items outstanding if Drain has been called.
Drained chan struct{}
}
// Drain starts the drain process. This cannot be undone.
func (d *drain) Drain() {
d.started = true
d.closeIfDrained()
}
// SetPending sets whether there is work pending or not. It may be called multiple times before or after Drain.
func (d *drain) SetPending(pending bool) {
d.pending = pending
d.closeIfDrained()
}
func (d *drain) closeIfDrained() {
if !d.pending && d.started {
// Check to see if d.Drained is closed before closing it.
// This allows SetPending(false) to be safely called multiple times.
select {
case <-d.Drained:
default:
close(d.Drained)
}
}
}

View file

@ -1,319 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"reflect"
"sort"
"testing"
"time"
"golang.org/x/net/context"
)
func TestKeepAliveExtendsDeadline(t *testing.T) {
ticker := make(chan time.Time)
deadline := time.Nanosecond * 15
s := &testService{modDeadlineCalled: make(chan modDeadlineCall)}
checkModDeadlineCall := func(ackIDs []string) {
got := <-s.modDeadlineCalled
sort.Strings(got.ackIDs)
want := modDeadlineCall{
subName: "subname",
deadline: deadline,
ackIDs: ackIDs,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("keepalive: got:\n%v\nwant:\n%v", got, want)
}
}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
Sub: "subname",
ExtensionTick: ticker,
Deadline: deadline,
MaxExtension: time.Hour,
}
ka.Start()
ka.Add("a")
ka.Add("b")
ticker <- time.Time{}
checkModDeadlineCall([]string{"a", "b"})
ka.Add("c")
ka.Remove("b")
ticker <- time.Time{}
checkModDeadlineCall([]string{"a", "c"})
ka.Remove("a")
ka.Remove("c")
ka.Add("d")
ticker <- time.Time{}
checkModDeadlineCall([]string{"d"})
ka.Remove("d")
ka.Stop()
}
func TestKeepAliveStopsWhenNoItem(t *testing.T) {
ticker := make(chan time.Time)
stopped := make(chan bool)
s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 3)}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
ExtensionTick: ticker,
}
ka.Start()
// There should be no call to modifyAckDeadline since there is no item.
ticker <- time.Time{}
go func() {
ka.Stop() // No items; should not block
if len(s.modDeadlineCalled) > 0 {
t.Errorf("unexpected extension to non-existent items: %v", <-s.modDeadlineCalled)
}
close(stopped)
}()
select {
case <-stopped:
case <-time.After(time.Second):
t.Errorf("keepAlive timed out waiting for stop")
}
}
func TestKeepAliveStopsWhenItemsExpired(t *testing.T) {
ticker := make(chan time.Time)
stopped := make(chan bool)
s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 2)}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
ExtensionTick: ticker,
MaxExtension: time.Duration(0), // Should expire items at the first tick.
}
ka.Start()
ka.Add("a")
ka.Add("b")
// Wait until the clock advances. Without this loop, this test fails on
// Windows because the clock doesn't advance at all between ka.Add and the
// expiration check after the tick is received.
begin := time.Now()
for time.Now().Equal(begin) {
time.Sleep(time.Millisecond)
}
// There should be no call to modifyAckDeadline since both items are expired.
ticker <- time.Time{}
go func() {
ka.Stop() // No live items; should not block.
if len(s.modDeadlineCalled) > 0 {
t.Errorf("unexpected extension to expired items")
}
close(stopped)
}()
select {
case <-stopped:
case <-time.After(time.Second):
t.Errorf("timed out waiting for stop")
}
}
func TestKeepAliveBlocksUntilAllItemsRemoved(t *testing.T) {
ticker := make(chan time.Time)
eventc := make(chan string, 3)
s := &testService{modDeadlineCalled: make(chan modDeadlineCall)}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
ExtensionTick: ticker,
MaxExtension: time.Hour, // Should not expire.
}
ka.Start()
ka.Add("a")
ka.Add("b")
go func() {
ticker <- time.Time{}
// We expect a call since both items should be extended.
select {
case args := <-s.modDeadlineCalled:
sort.Strings(args.ackIDs)
got := args.ackIDs
want := []string{"a", "b"}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want)
}
case <-time.After(time.Second):
t.Errorf("timed out waiting for deadline extend call")
}
time.Sleep(10 * time.Millisecond)
eventc <- "pre-remove-b"
// Remove one item, Stop should still be waiting.
ka.Remove("b")
ticker <- time.Time{}
// We expect a call since the item is still alive.
select {
case args := <-s.modDeadlineCalled:
got := args.ackIDs
want := []string{"a"}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want)
}
case <-time.After(time.Second):
t.Errorf("timed out waiting for deadline extend call")
}
time.Sleep(10 * time.Millisecond)
eventc <- "pre-remove-a"
// Remove the last item so that Stop can proceed.
ka.Remove("a")
}()
go func() {
ka.Stop() // Should block all item are removed.
eventc <- "post-stop"
}()
for i, want := range []string{"pre-remove-b", "pre-remove-a", "post-stop"} {
select {
case got := <-eventc:
if got != want {
t.Errorf("event #%d:\ngot %v\nwant %v", i, got, want)
}
case <-time.After(time.Second):
t.Errorf("time out waiting for #%d event: want %v", i, want)
}
}
}
// extendCallResult contains a list of ackIDs which are expected in an ackID
// extension request, along with the result that should be returned.
type extendCallResult struct {
ackIDs []string
err error
}
// extendService implements modifyAckDeadline using a hard-coded list of extendCallResults.
type extendService struct {
service
calls []extendCallResult
t *testing.T // used for error logging.
}
func (es *extendService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
if len(es.calls) == 0 {
es.t.Fatalf("unexpected call to modifyAckDeadline: ackIDs: %v", ackIDs)
}
call := es.calls[0]
es.calls = es.calls[1:]
if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) {
es.t.Errorf("unexpected arguments to modifyAckDeadline: got: %v ; want: %v", got, want)
}
return call.err
}
// Test implementation returns the first 2 elements as head, and the rest as tail.
func (es *extendService) splitAckIDs(ids []string) ([]string, []string) {
if len(ids) < 2 {
return ids, nil
}
return ids[:2], ids[2:]
}
func TestKeepAliveSplitsBatches(t *testing.T) {
type testCase struct {
calls []extendCallResult
}
for _, tc := range []testCase{
{
calls: []extendCallResult{
{
ackIDs: []string{"a", "b"},
},
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
{
calls: []extendCallResult{
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// On error we retry once.
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// We give up after failing twice, so we move on to the next set, "c" and "d".
{
ackIDs: []string{"c", "d"},
err: errors.New("bang"),
},
// Again, we retry once.
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
} {
s := &extendService{
t: t,
calls: tc.calls,
}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
Sub: "subname",
}
ka.extendDeadlines([]string{"a", "b", "c", "d", "e", "f"})
if len(s.calls) != 0 {
t.Errorf("expected extend calls did not occur: %v", s.calls)
}
}
}

View file

@ -102,7 +102,11 @@ func perfClient(pubDelay time.Duration, nConns int, f interface {
} }
conn, err := gtransport.DialInsecure(ctx, conn, err := gtransport.DialInsecure(ctx,
option.WithEndpoint(srv.Addr), option.WithEndpoint(srv.Addr),
option.WithGRPCConnectionPool(nConns)) option.WithGRPCConnectionPool(nConns),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
if err != nil { if err != nil {
f.Fatal(err) f.Fatal(err)
} }

View file

@ -62,6 +62,10 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
o = []option.ClientOption{ o = []option.ClientOption{
// Create multiple connections to increase throughput. // Create multiple connections to increase throughput.
option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)), option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()),
} }
} }
o = append(o, opts...) o = append(o, opts...)

View file

@ -1,115 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"sync"
"golang.org/x/net/context"
)
// puller fetches messages from the server in a batch.
type puller struct {
ctx context.Context
cancel context.CancelFunc
// keepAlive takes ownership of the lifetime of the message identified
// by ackID, ensuring that its ack deadline does not expire. It should
// be called each time a new message is fetched from the server, even
// if it is not yet returned from Next.
keepAlive func(ackID string)
// abandon should be called for each message which has previously been
// passed to keepAlive, but will never be returned by Next.
abandon func(ackID string)
// fetch fetches a batch of messages from the server.
fetch func() ([]*Message, error)
mu sync.Mutex
buf []*Message
}
// newPuller constructs a new puller.
// batchSize is the maximum number of messages to fetch at once.
// No more than batchSize messages will be outstanding at any time.
func newPuller(s service, subName string, ctx context.Context, batchSize int32, keepAlive, abandon func(ackID string)) *puller {
ctx, cancel := context.WithCancel(ctx)
return &puller{
cancel: cancel,
keepAlive: keepAlive,
abandon: abandon,
ctx: ctx,
fetch: func() ([]*Message, error) { return s.fetchMessages(ctx, subName, batchSize) },
}
}
const maxPullAttempts = 2
// Next returns the next message from the server, fetching a new batch if necessary.
// keepAlive is called with the ackIDs of newly fetched messages.
// If p.Ctx has already been cancelled before Next is called, no new messages
// will be fetched.
func (p *puller) Next() (*Message, error) {
p.mu.Lock()
defer p.mu.Unlock()
// If ctx has been cancelled, return straight away (even if there are buffered messages available).
select {
case <-p.ctx.Done():
return nil, p.ctx.Err()
default:
}
for len(p.buf) == 0 {
var buf []*Message
var err error
for i := 0; i < maxPullAttempts; i++ {
// Once Stop has completed, all future calls to Next will immediately fail at this point.
buf, err = p.fetch()
if err == nil || err == context.Canceled || err == context.DeadlineExceeded {
break
}
}
if err != nil {
return nil, err
}
for _, m := range buf {
p.keepAlive(m.ackID)
}
p.buf = buf
}
m := p.buf[0]
p.buf = p.buf[1:]
return m, nil
}
// Stop aborts any pending calls to Next, and prevents any future ones from succeeding.
// Stop also abandons any messages that have been pre-fetched.
// Once Stop completes, no calls to Next will succeed.
func (p *puller) Stop() {
// Next may be executing in another goroutine. Cancel it, and then wait until it terminates.
p.cancel()
p.mu.Lock()
defer p.mu.Unlock()
for _, m := range p.buf {
p.abandon(m.ackID)
}
p.buf = nil
}

View file

@ -1,154 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"reflect"
"testing"
"golang.org/x/net/context"
)
type fetchResult struct {
msgs []*Message
err error
}
type fetcherService struct {
service
results []fetchResult
unexpectedCall bool
}
func (s *fetcherService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
if len(s.results) == 0 {
s.unexpectedCall = true
return nil, errors.New("bang")
}
ret := s.results[0]
s.results = s.results[1:]
return ret.msgs, ret.err
}
func TestPuller(t *testing.T) {
s := &fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
{},
{
msgs: []*Message{{ackID: "c"}, {ackID: "d"}},
},
{
msgs: []*Message{{ackID: "e"}},
},
},
}
pulled := make(chan string, 10)
pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {})
got := []string{}
for i := 0; i < 5; i++ {
m, err := pull.Next()
got = append(got, m.ackID)
if err != nil {
t.Errorf("unexpected err from pull.Next: %v", err)
}
}
_, err := pull.Next()
if err == nil {
t.Errorf("unexpected err from pull.Next: %v", err)
}
want := []string{"a", "b", "c", "d", "e"}
if !reflect.DeepEqual(got, want) {
t.Errorf("pulled ack ids: got: %v ; want: %v", got, want)
}
}
func TestPullerAddsToKeepAlive(t *testing.T) {
s := &fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
{
msgs: []*Message{{ackID: "c"}, {ackID: "d"}},
},
},
}
pulled := make(chan string, 10)
pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {})
got := []string{}
for i := 0; i < 3; i++ {
m, err := pull.Next()
got = append(got, m.ackID)
if err != nil {
t.Errorf("unexpected err from pull.Next: %v", err)
}
}
want := []string{"a", "b", "c"}
if !reflect.DeepEqual(got, want) {
t.Errorf("pulled ack ids: got: %v ; want: %v", got, want)
}
close(pulled)
// We should have seen "d" written to the channel too, even though it hasn't been returned yet.
pulledIDs := []string{}
for id := range pulled {
pulledIDs = append(pulledIDs, id)
}
want = append(want, "d")
if !reflect.DeepEqual(pulledIDs, want) {
t.Errorf("pulled ack ids: got: %v ; want: %v", pulledIDs, want)
}
}
func TestPullerRetriesOnce(t *testing.T) {
bang := errors.New("bang")
s := &fetcherService{
results: []fetchResult{
{
err: bang,
},
{
err: bang,
},
},
}
pull := newPuller(s, "subname", context.Background(), 2, func(string) {}, func(string) {})
_, err := pull.Next()
if err != bang {
t.Errorf("pull.Next err got: %v, want: %v", err, bang)
}
if s.unexpectedCall {
t.Errorf("unexpected retry")
}
if len(s.results) != 0 {
t.Errorf("outstanding calls: got: %v, want: 0", len(s.results))
}
}

View file

@ -58,9 +58,6 @@ func TestStreamingPullMultipleFetches(t *testing.T) {
} }
func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) { func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) {
if !useStreamingPull {
t.SkipNow()
}
sub := client.Subscription("s") sub := client.Subscription("s")
gotMsgs, err := pullN(context.Background(), sub, len(msgs), func(_ context.Context, m *Message) { gotMsgs, err := pullN(context.Background(), sub, len(msgs), func(_ context.Context, m *Message) {
id, err := strconv.Atoi(m.ackID) id, err := strconv.Atoi(m.ackID)
@ -116,9 +113,6 @@ func TestStreamingPullError(t *testing.T) {
// If an RPC to the service returns a non-retryable error, Pull should // If an RPC to the service returns a non-retryable error, Pull should
// return after all callbacks return, without waiting for messages to be // return after all callbacks return, without waiting for messages to be
// acked. // acked.
if !useStreamingPull {
t.SkipNow()
}
client, server := newFake(t) client, server := newFake(t)
server.addStreamingPullMessages(testMessages[:1]) server.addStreamingPullMessages(testMessages[:1])
server.addStreamingPullError(grpc.Errorf(codes.Unknown, "")) server.addStreamingPullError(grpc.Errorf(codes.Unknown, ""))
@ -148,9 +142,6 @@ func TestStreamingPullError(t *testing.T) {
func TestStreamingPullCancel(t *testing.T) { func TestStreamingPullCancel(t *testing.T) {
// If Receive's context is canceled, it should return after all callbacks // If Receive's context is canceled, it should return after all callbacks
// return and all messages have been acked. // return and all messages have been acked.
if !useStreamingPull {
t.SkipNow()
}
client, server := newFake(t) client, server := newFake(t)
server.addStreamingPullMessages(testMessages) server.addStreamingPullMessages(testMessages)
sub := client.Subscription("s") sub := client.Subscription("s")
@ -171,9 +162,6 @@ func TestStreamingPullCancel(t *testing.T) {
} }
func TestStreamingPullRetry(t *testing.T) { func TestStreamingPullRetry(t *testing.T) {
if !useStreamingPull {
t.SkipNow()
}
// Check that we retry on io.EOF or Unavailable. // Check that we retry on io.EOF or Unavailable.
client, server := newFake(t) client, server := newFake(t)
server.addStreamingPullMessages(testMessages[:1]) server.addStreamingPullMessages(testMessages[:1])
@ -189,9 +177,6 @@ func TestStreamingPullRetry(t *testing.T) {
func TestStreamingPullOneActive(t *testing.T) { func TestStreamingPullOneActive(t *testing.T) {
// Only one call to Pull can be active at a time. // Only one call to Pull can be active at a time.
if !useStreamingPull {
t.SkipNow()
}
client, srv := newFake(t) client, srv := newFake(t)
srv.addStreamingPullMessages(testMessages[:1]) srv.addStreamingPullMessages(testMessages[:1])
sub := client.Subscription("s") sub := client.Subscription("s")
@ -210,9 +195,6 @@ func TestStreamingPullOneActive(t *testing.T) {
} }
func TestStreamingPullConcurrent(t *testing.T) { func TestStreamingPullConcurrent(t *testing.T) {
if !useStreamingPull {
t.SkipNow()
}
newMsg := func(i int) *pb.ReceivedMessage { newMsg := func(i int) *pb.ReceivedMessage {
return &pb.ReceivedMessage{ return &pb.ReceivedMessage{
AckId: strconv.Itoa(i), AckId: strconv.Itoa(i),
@ -249,9 +231,6 @@ func TestStreamingPullConcurrent(t *testing.T) {
func TestStreamingPullFlowControl(t *testing.T) { func TestStreamingPullFlowControl(t *testing.T) {
// Callback invocations should not occur if flow control limits are exceeded. // Callback invocations should not occur if flow control limits are exceeded.
if !useStreamingPull {
t.SkipNow()
}
client, server := newFake(t) client, server := newFake(t)
server.addStreamingPullMessages(testMessages) server.addStreamingPullMessages(testMessages)
sub := client.Subscription("s") sub := client.Subscription("s")

View file

@ -17,6 +17,7 @@ package pubsub
import ( import (
"errors" "errors"
"fmt" "fmt"
"io"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -24,7 +25,6 @@ import (
"cloud.google.com/go/iam" "cloud.google.com/go/iam"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"google.golang.org/api/iterator"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
@ -152,6 +152,12 @@ type ReceiveSettings struct {
// NumGoroutines is the number of goroutines Receive will spawn to pull // NumGoroutines is the number of goroutines Receive will spawn to pull
// messages concurrently. If NumGoroutines is less than 1, it will be treated // messages concurrently. If NumGoroutines is less than 1, it will be treated
// as if it were DefaultReceiveSettings.NumGoroutines. // as if it were DefaultReceiveSettings.NumGoroutines.
//
// NumGoroutines does not limit the number of messages that can be processed
// concurrently. Even with one goroutine, many messages might be processed at
// once, because that goroutine may continually receive messages and invoke the
// function passed to Receive on them. To limit the number of messages being
// processed concurrently, set MaxOutstandingMessages.
NumGoroutines int NumGoroutines int
} }
@ -352,35 +358,40 @@ func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowCon
wg.Add(1) wg.Add(1)
go func() { go func() {
<-ctx2.Done() <-ctx2.Done()
iter.Stop() iter.stop()
wg.Done() wg.Done()
}() }()
defer wg.Wait() defer wg.Wait()
defer cancel() defer cancel()
for { for {
msg, err := iter.Next() msgs, err := iter.receive()
if err == iterator.Done { if err == io.EOF {
return nil return nil
} }
if err != nil { if err != nil {
return err return err
} }
// TODO(jba): call acquire closer to when the message is allocated. for i, msg := range msgs {
if err := fc.acquire(ctx, len(msg.Data)); err != nil { msg := msg
// TODO(jba): test that this "orphaned" message is nacked immediately when ctx is done. // TODO(jba): call acquire closer to when the message is allocated.
msg.Nack() if err := fc.acquire(ctx, len(msg.Data)); err != nil {
return nil // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done.
for _, m := range msgs[i:] {
m.Nack()
}
return nil
}
wg.Add(1)
go func() {
// TODO(jba): call release when the message is available for GC.
// This considers the message to be released when
// f is finished, but f may ack early or not at all.
defer wg.Done()
defer fc.release(len(msg.Data))
f(ctx2, msg)
}()
} }
wg.Add(1)
go func() {
// TODO(jba): call release when the message is available for GC.
// This considers the message to be released when
// f is finished, but f may ack early or not at all.
defer wg.Done()
defer fc.release(len(msg.Data))
f(ctx2, msg)
}()
} }
} }

View file

@ -106,14 +106,24 @@ func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) {
return t, err return t, err
} }
// Topic creates a reference to a topic. // Topic creates a reference to a topic in the client's project.
// //
// If a Topic's Publish method is called, it has background goroutines // If a Topic's Publish method is called, it has background goroutines
// associated with it. Clean them up by calling Topic.Stop. // associated with it. Clean them up by calling Topic.Stop.
// //
// Avoid creating many Topic instances if you use them to publish. // Avoid creating many Topic instances if you use them to publish.
func (c *Client) Topic(id string) *Topic { func (c *Client) Topic(id string) *Topic {
return newTopic(c.s, fmt.Sprintf("projects/%s/topics/%s", c.projectID, id)) return c.TopicInProject(id, c.projectID)
}
// TopicInProject creates a reference to a topic in the given project.
//
// If a Topic's Publish method is called, it has background goroutines
// associated with it. Clean them up by calling Topic.Stop.
//
// Avoid creating many Topic instances if you use them to publish.
func (c *Client) TopicInProject(id, projectID string) *Topic {
return newTopic(c.s, fmt.Sprintf("projects/%s/topics/%s", projectID, id))
} }
func newTopic(s service, name string) *Topic { func newTopic(s service, name string) *Topic {

View file

@ -83,4 +83,6 @@ for pkg in $(go list $prefix/...); do # for each package in the repo
fi fi
done done
run go test -race -v -short $shorts run go test -race -v -short $shorts
run go test -race -v $fulls if [[ $fulls != "" ]]; then
run go test -race -v $fulls
fi

View file

@ -35,11 +35,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
databaseAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}")
databaseAdminDatabasePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}/databases/{database}")
)
// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient. // DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient.
type DatabaseAdminCallOptions struct { type DatabaseAdminCallOptions struct {
ListDatabases []gax.CallOption ListDatabases []gax.CallOption
@ -126,7 +121,7 @@ func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*
databaseAdminClient: databasepb.NewDatabaseAdminClient(conn), databaseAdminClient: databasepb.NewDatabaseAdminClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil { if err != nil {
@ -152,10 +147,10 @@ func (c *DatabaseAdminClient) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *DatabaseAdminClient) SetGoogleClientInfo(keyval ...string) { func (c *DatabaseAdminClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -163,27 +158,24 @@ func (c *DatabaseAdminClient) SetGoogleClientInfo(keyval ...string) {
// DatabaseAdminInstancePath returns the path for the instance resource. // DatabaseAdminInstancePath returns the path for the instance resource.
func DatabaseAdminInstancePath(project, instance string) string { func DatabaseAdminInstancePath(project, instance string) string {
path, err := databaseAdminInstancePathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"instance": instance, project +
}) "/instances/" +
if err != nil { instance +
panic(err) ""
}
return path
} }
// DatabaseAdminDatabasePath returns the path for the database resource. // DatabaseAdminDatabasePath returns the path for the database resource.
func DatabaseAdminDatabasePath(project, instance, database string) string { func DatabaseAdminDatabasePath(project, instance, database string) string {
path, err := databaseAdminDatabasePathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"instance": instance, project +
"database": database, "/instances/" +
}) instance +
if err != nil { "/databases/" +
panic(err) database +
} ""
return path
} }
// ListDatabases lists Cloud Spanner databases. // ListDatabases lists Cloud Spanner databases.
@ -223,7 +215,7 @@ func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb
// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving. // CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving.
// The returned [long-running operation][google.longrunning.Operation] will // The returned [long-running operation][google.longrunning.Operation] will
// have a name of the format `<database_name>/operations/<operation_id>` and // have a name of the format <database_name>/operations/<operation_id> and
// can be used to track preparation of the database. The // can be used to track preparation of the database. The
// [metadata][google.longrunning.Operation.metadata] field type is // [metadata][google.longrunning.Operation.metadata] field type is
// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The
@ -265,7 +257,7 @@ func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.G
// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by // UpdateDatabaseDdl updates the schema of a Cloud Spanner database by
// creating/altering/dropping tables, columns, indexes, etc. The returned // creating/altering/dropping tables, columns, indexes, etc. The returned
// [long-running operation][google.longrunning.Operation] will have a name of // [long-running operation][google.longrunning.Operation] will have a name of
// the format `<database_name>/operations/<operation_id>` and can be used to // the format <database_name>/operations/<operation_id> and can be used to
// track execution of the schema change(s). The // track execution of the schema change(s). The
// [metadata][google.longrunning.Operation.metadata] field type is // [metadata][google.longrunning.Operation.metadata] field type is
// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response.
@ -319,7 +311,7 @@ func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasep
// SetIamPolicy sets the access control policy on a database resource. Replaces any // SetIamPolicy sets the access control policy on a database resource. Replaces any
// existing policy. // existing policy.
// //
// Authorization requires `spanner.databases.setIamPolicy` permission on // Authorization requires spanner.databases.setIamPolicy permission on
// [resource][google.iam.v1.SetIamPolicyRequest.resource]. // [resource][google.iam.v1.SetIamPolicyRequest.resource].
func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
@ -339,7 +331,7 @@ func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIa
// GetIamPolicy gets the access control policy for a database resource. Returns an empty // GetIamPolicy gets the access control policy for a database resource. Returns an empty
// policy if a database exists but does not have a policy set. // policy if a database exists but does not have a policy set.
// //
// Authorization requires `spanner.databases.getIamPolicy` permission on // Authorization requires spanner.databases.getIamPolicy permission on
// [resource][google.iam.v1.GetIamPolicyRequest.resource]. // [resource][google.iam.v1.GetIamPolicyRequest.resource].
func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
@ -359,7 +351,7 @@ func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIa
// TestIamPermissions returns permissions that the caller has on the specified database resource. // TestIamPermissions returns permissions that the caller has on the specified database resource.
// //
// Attempting this RPC on a non-existent Cloud Spanner database will result in // Attempting this RPC on a non-existent Cloud Spanner database will result in
// a NOT_FOUND error if the user has `spanner.databases.list` permission on // a NOT_FOUND error if the user has spanner.databases.list permission on
// the containing Cloud Spanner instance. Otherwise returns an empty set of // the containing Cloud Spanner instance. Otherwise returns an empty set of
// permissions. // permissions.
func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
@ -437,7 +429,7 @@ func (c *DatabaseAdminClient) CreateDatabaseOperation(name string) *CreateDataba
// See documentation of Poll for error-handling information. // See documentation of Poll for error-handling information.
func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
var resp databasepb.Database var resp databasepb.Database
if err := op.lro.Wait(ctx, &resp, opts...); err != nil { if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err return nil, err
} }
return &resp, nil return &resp, nil
@ -505,7 +497,7 @@ func (c *DatabaseAdminClient) UpdateDatabaseDdlOperation(name string) *UpdateDat
// //
// See documentation of Poll for error-handling information. // See documentation of Poll for error-handling information.
func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error { func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Wait(ctx, nil, opts...) return op.lro.WaitWithInterval(ctx, nil, 45000*time.Millisecond, opts...)
} }
// Poll fetches the latest state of the long-running operation. // Poll fetches the latest state of the long-running operation.

View file

@ -31,8 +31,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -31,8 +31,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
// DefaultAuthScopes reports the authentication scopes required // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
// by this package.
func DefaultAuthScopes() []string { func DefaultAuthScopes() []string {
return []string{ return []string{
"https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform",

View file

@ -35,12 +35,6 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
) )
var (
instanceAdminProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
instanceAdminInstanceConfigPathTemplate = gax.MustCompilePathTemplate("projects/{project}/instanceConfigs/{instance_config}")
instanceAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}")
)
// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient. // InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient.
type InstanceAdminCallOptions struct { type InstanceAdminCallOptions struct {
ListInstanceConfigs []gax.CallOption ListInstanceConfigs []gax.CallOption
@ -145,7 +139,7 @@ func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*
instanceAdminClient: instancepb.NewInstanceAdminClient(conn), instanceAdminClient: instancepb.NewInstanceAdminClient(conn),
} }
c.SetGoogleClientInfo() c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil { if err != nil {
@ -171,10 +165,10 @@ func (c *InstanceAdminClient) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// SetGoogleClientInfo sets the name and version of the application in // setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for // the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients. // use by Google-written clients.
func (c *InstanceAdminClient) SetGoogleClientInfo(keyval ...string) { func (c *InstanceAdminClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...) kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)} c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -182,37 +176,30 @@ func (c *InstanceAdminClient) SetGoogleClientInfo(keyval ...string) {
// InstanceAdminProjectPath returns the path for the project resource. // InstanceAdminProjectPath returns the path for the project resource.
func InstanceAdminProjectPath(project string) string { func InstanceAdminProjectPath(project string) string {
path, err := instanceAdminProjectPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
}) project +
if err != nil { ""
panic(err)
}
return path
} }
// InstanceAdminInstanceConfigPath returns the path for the instance config resource. // InstanceAdminInstanceConfigPath returns the path for the instance config resource.
func InstanceAdminInstanceConfigPath(project, instanceConfig string) string { func InstanceAdminInstanceConfigPath(project, instanceConfig string) string {
path, err := instanceAdminInstanceConfigPathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"instance_config": instanceConfig, project +
}) "/instanceConfigs/" +
if err != nil { instanceConfig +
panic(err) ""
}
return path
} }
// InstanceAdminInstancePath returns the path for the instance resource. // InstanceAdminInstancePath returns the path for the instance resource.
func InstanceAdminInstancePath(project, instance string) string { func InstanceAdminInstancePath(project, instance string) string {
path, err := instanceAdminInstancePathTemplate.Render(map[string]string{ return "" +
"project": project, "projects/" +
"instance": instance, project +
}) "/instances/" +
if err != nil { instance +
panic(err) ""
}
return path
} }
// ListInstanceConfigs lists the supported instance configurations for a given project. // ListInstanceConfigs lists the supported instance configurations for a given project.
@ -321,31 +308,36 @@ func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.G
// returned [long-running operation][google.longrunning.Operation] // returned [long-running operation][google.longrunning.Operation]
// can be used to track the progress of preparing the new // can be used to track the progress of preparing the new
// instance. The instance name is assigned by the caller. If the // instance. The instance name is assigned by the caller. If the
// named instance already exists, `CreateInstance` returns // named instance already exists, CreateInstance returns
// `ALREADY_EXISTS`. // ALREADY_EXISTS.
// //
// Immediately upon completion of this request: // Immediately upon completion of this request:
// //
// * The instance is readable via the API, with all requested attributes // The instance is readable via the API, with all requested attributes
// but no allocated resources. Its state is `CREATING`. // but no allocated resources. Its state is CREATING.
// //
// Until completion of the returned operation: // Until completion of the returned operation:
// //
// * Cancelling the operation renders the instance immediately unreadable // Cancelling the operation renders the instance immediately unreadable
// via the API. // via the API.
// * The instance can be deleted. //
// * All other attempts to modify the instance are rejected. // The instance can be deleted.
//
// All other attempts to modify the instance are rejected.
// //
// Upon completion of the returned operation: // Upon completion of the returned operation:
// //
// * Billing for all successfully-allocated resources begins (some types // Billing for all successfully-allocated resources begins (some types
// may have lower than the requested levels). // may have lower than the requested levels).
// * Databases can be created in the instance. //
// * The instance's allocated resource levels are readable via the API. // Databases can be created in the instance.
// * The instance's state becomes `READY`. //
// The instance's allocated resource levels are readable via the API.
//
// The instance's state becomes READY.
// //
// The returned [long-running operation][google.longrunning.Operation] will // The returned [long-running operation][google.longrunning.Operation] will
// have a name of the format `<instance_name>/operations/<operation_id>` and // have a name of the format <instance_name>/operations/<operation_id> and
// can be used to track creation of the instance. The // can be used to track creation of the instance. The
// [metadata][google.longrunning.Operation.metadata] field type is // [metadata][google.longrunning.Operation.metadata] field type is
// [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
@ -372,41 +364,45 @@ func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancep
// as requested. The returned [long-running // as requested. The returned [long-running
// operation][google.longrunning.Operation] can be used to track the // operation][google.longrunning.Operation] can be used to track the
// progress of updating the instance. If the named instance does not // progress of updating the instance. If the named instance does not
// exist, returns `NOT_FOUND`. // exist, returns NOT_FOUND.
// //
// Immediately upon completion of this request: // Immediately upon completion of this request:
// //
// * For resource types for which a decrease in the instance's allocation // For resource types for which a decrease in the instance's allocation
// has been requested, billing is based on the newly-requested level. // has been requested, billing is based on the newly-requested level.
// //
// Until completion of the returned operation: // Until completion of the returned operation:
// //
// * Cancelling the operation sets its metadata's // Cancelling the operation sets its metadata's
// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins
// restoring resources to their pre-request values. The operation // restoring resources to their pre-request values. The operation
// is guaranteed to succeed at undoing all resource changes, // is guaranteed to succeed at undoing all resource changes,
// after which point it terminates with a `CANCELLED` status. // after which point it terminates with a CANCELLED status.
// * All other attempts to modify the instance are rejected. //
// * Reading the instance via the API continues to give the pre-request // All other attempts to modify the instance are rejected.
// resource levels. //
// Reading the instance via the API continues to give the pre-request
// resource levels.
// //
// Upon completion of the returned operation: // Upon completion of the returned operation:
// //
// * Billing begins for all successfully-allocated resources (some types // Billing begins for all successfully-allocated resources (some types
// may have lower than the requested levels). // may have lower than the requested levels).
// * All newly-reserved resources are available for serving the instance's //
// tables. // All newly-reserved resources are available for serving the instance's
// * The instance's new resource levels are readable via the API. // tables.
//
// The instance's new resource levels are readable via the API.
// //
// The returned [long-running operation][google.longrunning.Operation] will // The returned [long-running operation][google.longrunning.Operation] will
// have a name of the format `<instance_name>/operations/<operation_id>` and // have a name of the format <instance_name>/operations/<operation_id> and
// can be used to track the instance modification. The // can be used to track the instance modification. The
// [metadata][google.longrunning.Operation.metadata] field type is // [metadata][google.longrunning.Operation.metadata] field type is
// [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
// The [response][google.longrunning.Operation.response] field type is // The [response][google.longrunning.Operation.response] field type is
// [Instance][google.spanner.admin.instance.v1.Instance], if successful. // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
// //
// Authorization requires `spanner.instances.update` permission on // Authorization requires spanner.instances.update permission on
// resource [name][google.spanner.admin.instance.v1.Instance.name]. // resource [name][google.spanner.admin.instance.v1.Instance.name].
func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) { func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
@ -429,13 +425,13 @@ func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancep
// //
// Immediately upon completion of the request: // Immediately upon completion of the request:
// //
// * Billing ceases for all of the instance's reserved resources. // Billing ceases for all of the instance's reserved resources.
// //
// Soon afterward: // Soon afterward:
// //
// * The instance and *all of its databases* immediately and // The instance and all of its databases immediately and
// irrevocably disappear from the API. All data in the databases // irrevocably disappear from the API. All data in the databases
// is permanently deleted. // is permanently deleted.
func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error { func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteInstance[0:len(c.CallOptions.DeleteInstance):len(c.CallOptions.DeleteInstance)], opts...) opts = append(c.CallOptions.DeleteInstance[0:len(c.CallOptions.DeleteInstance):len(c.CallOptions.DeleteInstance)], opts...)
@ -450,7 +446,7 @@ func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancep
// SetIamPolicy sets the access control policy on an instance resource. Replaces any // SetIamPolicy sets the access control policy on an instance resource. Replaces any
// existing policy. // existing policy.
// //
// Authorization requires `spanner.instances.setIamPolicy` on // Authorization requires spanner.instances.setIamPolicy on
// [resource][google.iam.v1.SetIamPolicyRequest.resource]. // [resource][google.iam.v1.SetIamPolicyRequest.resource].
func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
@ -470,7 +466,7 @@ func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIa
// GetIamPolicy gets the access control policy for an instance resource. Returns an empty // GetIamPolicy gets the access control policy for an instance resource. Returns an empty
// policy if an instance exists but does not have a policy set. // policy if an instance exists but does not have a policy set.
// //
// Authorization requires `spanner.instances.getIamPolicy` on // Authorization requires spanner.instances.getIamPolicy on
// [resource][google.iam.v1.GetIamPolicyRequest.resource]. // [resource][google.iam.v1.GetIamPolicyRequest.resource].
func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
ctx = insertXGoog(ctx, c.xGoogHeader) ctx = insertXGoog(ctx, c.xGoogHeader)
@ -490,7 +486,7 @@ func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIa
// TestIamPermissions returns permissions that the caller has on the specified instance resource. // TestIamPermissions returns permissions that the caller has on the specified instance resource.
// //
// Attempting this RPC on a non-existent Cloud Spanner instance resource will // Attempting this RPC on a non-existent Cloud Spanner instance resource will
// result in a NOT_FOUND error if the user has `spanner.instances.list` // result in a NOT_FOUND error if the user has spanner.instances.list
// permission on the containing Google Cloud Project. Otherwise returns an // permission on the containing Google Cloud Project. Otherwise returns an
// empty set of permissions. // empty set of permissions.
func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
@ -610,7 +606,7 @@ func (c *InstanceAdminClient) CreateInstanceOperation(name string) *CreateInstan
// See documentation of Poll for error-handling information. // See documentation of Poll for error-handling information.
func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
var resp instancepb.Instance var resp instancepb.Instance
if err := op.lro.Wait(ctx, &resp, opts...); err != nil { if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err return nil, err
} }
return &resp, nil return &resp, nil
@ -679,7 +675,7 @@ func (c *InstanceAdminClient) UpdateInstanceOperation(name string) *UpdateInstan
// See documentation of Poll for error-handling information. // See documentation of Poll for error-handling information.
func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
var resp instancepb.Instance var resp instancepb.Instance
if err := op.lro.Wait(ctx, &resp, opts...); err != nil { if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err return nil, err
} }
return &resp, nil return &resp, nil

44
vendor/cloud.google.com/go/spanner/apiv1/doc.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package spanner is an experimental, auto-generated package for the
// Cloud Spanner API.
//
// Cloud Spanner is a managed, mission-critical, globally consistent and
// scalable relational database service.
//
// Use the client at cloud.google.com/go/spanner in preference to this.
package spanner // import "cloud.google.com/go/spanner/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertXGoog(ctx context.Context, val []string) context.Context {
md, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy()
md["x-goog-api-client"] = val
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/spanner.data",
}
}

853
vendor/cloud.google.com/go/spanner/apiv1/mock_test.go generated vendored Normal file
View file

@ -0,0 +1,853 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package spanner
import (
emptypb "github.com/golang/protobuf/ptypes/empty"
spannerpb "google.golang.org/genproto/googleapis/spanner/v1"
)
import (
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"golang.org/x/net/context"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockSpannerServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
spannerpb.SpannerServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockSpannerServer) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest) (*spannerpb.Session, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*spannerpb.Session), nil
}
func (s *mockSpannerServer) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest) (*spannerpb.Session, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*spannerpb.Session), nil
}
func (s *mockSpannerServer) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}
func (s *mockSpannerServer) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest) (*spannerpb.ResultSet, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*spannerpb.ResultSet), nil
}
func (s *mockSpannerServer) ExecuteStreamingSql(req *spannerpb.ExecuteSqlRequest, stream spannerpb.Spanner_ExecuteStreamingSqlServer) error {
md, _ := metadata.FromIncomingContext(stream.Context())
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return s.err
}
for _, v := range s.resps {
if err := stream.Send(v.(*spannerpb.PartialResultSet)); err != nil {
return err
}
}
return nil
}
func (s *mockSpannerServer) Read(ctx context.Context, req *spannerpb.ReadRequest) (*spannerpb.ResultSet, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*spannerpb.ResultSet), nil
}
func (s *mockSpannerServer) StreamingRead(req *spannerpb.ReadRequest, stream spannerpb.Spanner_StreamingReadServer) error {
md, _ := metadata.FromIncomingContext(stream.Context())
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return s.err
}
for _, v := range s.resps {
if err := stream.Send(v.(*spannerpb.PartialResultSet)); err != nil {
return err
}
}
return nil
}
func (s *mockSpannerServer) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest) (*spannerpb.Transaction, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*spannerpb.Transaction), nil
}
func (s *mockSpannerServer) Commit(ctx context.Context, req *spannerpb.CommitRequest) (*spannerpb.CommitResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*spannerpb.CommitResponse), nil
}
func (s *mockSpannerServer) Rollback(ctx context.Context, req *spannerpb.RollbackRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockSpanner mockSpannerServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
spannerpb.RegisterSpannerServer(serv, &mockSpanner)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestSpannerCreateSession(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &spannerpb.Session{
Name: name,
}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedDatabase string = DatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]")
var request = &spannerpb.CreateSessionRequest{
Database: formattedDatabase,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateSession(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerCreateSessionError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedDatabase string = DatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]")
var request = &spannerpb.CreateSessionRequest{
Database: formattedDatabase,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateSession(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerGetSession(t *testing.T) {
var name2 string = "name2-1052831874"
var expectedResponse = &spannerpb.Session{
Name: name2,
}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var request = &spannerpb.GetSessionRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetSession(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerGetSessionError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var request = &spannerpb.GetSessionRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetSession(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerDeleteSession(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var request = &spannerpb.DeleteSessionRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteSession(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
}
func TestSpannerDeleteSessionError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var request = &spannerpb.DeleteSessionRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteSession(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestSpannerExecuteSql(t *testing.T) {
var expectedResponse *spannerpb.ResultSet = &spannerpb.ResultSet{}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var sql string = "sql114126"
var request = &spannerpb.ExecuteSqlRequest{
Session: formattedSession,
Sql: sql,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ExecuteSql(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerExecuteSqlError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var sql string = "sql114126"
var request = &spannerpb.ExecuteSqlRequest{
Session: formattedSession,
Sql: sql,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ExecuteSql(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerExecuteStreamingSql(t *testing.T) {
var chunkedValue bool = true
var resumeToken []byte = []byte("103")
var expectedResponse = &spannerpb.PartialResultSet{
ChunkedValue: chunkedValue,
ResumeToken: resumeToken,
}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var sql string = "sql114126"
var request = &spannerpb.ExecuteSqlRequest{
Session: formattedSession,
Sql: sql,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.ExecuteStreamingSql(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerExecuteStreamingSqlError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var sql string = "sql114126"
var request = &spannerpb.ExecuteSqlRequest{
Session: formattedSession,
Sql: sql,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.ExecuteStreamingSql(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerRead(t *testing.T) {
var expectedResponse *spannerpb.ResultSet = &spannerpb.ResultSet{}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var table string = "table110115790"
var columns []string = nil
var keySet *spannerpb.KeySet = &spannerpb.KeySet{}
var request = &spannerpb.ReadRequest{
Session: formattedSession,
Table: table,
Columns: columns,
KeySet: keySet,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.Read(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerReadError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var table string = "table110115790"
var columns []string = nil
var keySet *spannerpb.KeySet = &spannerpb.KeySet{}
var request = &spannerpb.ReadRequest{
Session: formattedSession,
Table: table,
Columns: columns,
KeySet: keySet,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.Read(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerStreamingRead(t *testing.T) {
var chunkedValue bool = true
var resumeToken []byte = []byte("103")
var expectedResponse = &spannerpb.PartialResultSet{
ChunkedValue: chunkedValue,
ResumeToken: resumeToken,
}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var table string = "table110115790"
var columns []string = nil
var keySet *spannerpb.KeySet = &spannerpb.KeySet{}
var request = &spannerpb.ReadRequest{
Session: formattedSession,
Table: table,
Columns: columns,
KeySet: keySet,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.StreamingRead(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerStreamingReadError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var table string = "table110115790"
var columns []string = nil
var keySet *spannerpb.KeySet = &spannerpb.KeySet{}
var request = &spannerpb.ReadRequest{
Session: formattedSession,
Table: table,
Columns: columns,
KeySet: keySet,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
stream, err := c.StreamingRead(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := stream.Recv()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerBeginTransaction(t *testing.T) {
var id []byte = []byte("27")
var expectedResponse = &spannerpb.Transaction{
Id: id,
}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var options *spannerpb.TransactionOptions = &spannerpb.TransactionOptions{}
var request = &spannerpb.BeginTransactionRequest{
Session: formattedSession,
Options: options,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BeginTransaction(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerBeginTransactionError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var options *spannerpb.TransactionOptions = &spannerpb.TransactionOptions{}
var request = &spannerpb.BeginTransactionRequest{
Session: formattedSession,
Options: options,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.BeginTransaction(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerCommit(t *testing.T) {
var expectedResponse *spannerpb.CommitResponse = &spannerpb.CommitResponse{}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var mutations []*spannerpb.Mutation = nil
var request = &spannerpb.CommitRequest{
Session: formattedSession,
Mutations: mutations,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.Commit(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestSpannerCommitError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var mutations []*spannerpb.Mutation = nil
var request = &spannerpb.CommitRequest{
Session: formattedSession,
Mutations: mutations,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.Commit(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestSpannerRollback(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
mockSpanner.err = nil
mockSpanner.reqs = nil
mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse)
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var transactionId []byte = []byte("28")
var request = &spannerpb.RollbackRequest{
Session: formattedSession,
TransactionId: transactionId,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.Rollback(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
}
func TestSpannerRollbackError(t *testing.T) {
errCode := codes.PermissionDenied
mockSpanner.err = gstatus.Error(errCode, "test error")
var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]")
var transactionId []byte = []byte("28")
var request = &spannerpb.RollbackRequest{
Session: formattedSession,
TransactionId: transactionId,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.Rollback(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}

View file

@ -0,0 +1,392 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package spanner
import (
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
spannerpb "google.golang.org/genproto/googleapis/spanner/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
CreateSession []gax.CallOption
GetSession []gax.CallOption
DeleteSession []gax.CallOption
ExecuteSql []gax.CallOption
ExecuteStreamingSql []gax.CallOption
Read []gax.CallOption
StreamingRead []gax.CallOption
BeginTransaction []gax.CallOption
Commit []gax.CallOption
Rollback []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("spanner.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 32000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"long_running", "long_running"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 32000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
CreateSession: retry[[2]string{"default", "idempotent"}],
GetSession: retry[[2]string{"default", "idempotent"}],
DeleteSession: retry[[2]string{"default", "idempotent"}],
ExecuteSql: retry[[2]string{"default", "idempotent"}],
ExecuteStreamingSql: retry[[2]string{"default", "non_idempotent"}],
Read: retry[[2]string{"default", "idempotent"}],
StreamingRead: retry[[2]string{"default", "non_idempotent"}],
BeginTransaction: retry[[2]string{"default", "idempotent"}],
Commit: retry[[2]string{"long_running", "long_running"}],
Rollback: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with Cloud Spanner API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client spannerpb.SpannerClient
// The call options for this service.
CallOptions *CallOptions
// The metadata to be sent with each request.
xGoogHeader []string
}
// NewClient creates a new spanner client.
//
// Cloud Spanner API
//
// The Cloud Spanner API can be used to manage sessions and execute
// transactions on data stored in Cloud Spanner databases.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: spannerpb.NewSpannerClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
}
// DatabasePath returns the path for the database resource.
func DatabasePath(project, instance, database string) string {
return "" +
"projects/" +
project +
"/instances/" +
instance +
"/databases/" +
database +
""
}
// SessionPath returns the path for the session resource.
func SessionPath(project, instance, database, session string) string {
return "" +
"projects/" +
project +
"/instances/" +
instance +
"/databases/" +
database +
"/sessions/" +
session +
""
}
// CreateSession creates a new session. A session can be used to perform
// transactions that read and/or modify data in a Cloud Spanner database.
// Sessions are meant to be reused for many consecutive
// transactions.
//
// Sessions can only execute one transaction at a time. To execute
// multiple concurrent read-write/write-only transactions, create
// multiple sessions. Note that standalone reads and queries use a
// transaction internally, and count toward the one transaction
// limit.
//
// Cloud Spanner limits the number of sessions that can exist at any given
// time; thus, it is a good idea to delete idle and/or unneeded sessions.
// Aside from explicit deletes, Cloud Spanner can delete sessions for which no
// operations are sent for more than an hour. If a session is deleted,
// requests to it return NOT_FOUND.
//
// Idle sessions can be kept alive by sending a trivial SQL query
// periodically, e.g., "SELECT 1".
func (c *Client) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateSession[0:len(c.CallOptions.CreateSession):len(c.CallOptions.CreateSession)], opts...)
var resp *spannerpb.Session
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateSession(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetSession gets a session. Returns NOT_FOUND if the session does not exist.
// This is mainly useful for determining whether a session is still
// alive.
func (c *Client) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.GetSession[0:len(c.CallOptions.GetSession):len(c.CallOptions.GetSession)], opts...)
var resp *spannerpb.Session
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetSession(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteSession ends a session, releasing server resources associated with it.
func (c *Client) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteSession[0:len(c.CallOptions.DeleteSession):len(c.CallOptions.DeleteSession)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteSession(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ExecuteSql executes an SQL query, returning all rows in a single reply. This
// method cannot be used to return a result set larger than 10 MiB;
// if the query yields more data than that, the query fails with
// a FAILED_PRECONDITION error.
//
// Queries inside read-write transactions might return ABORTED. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
//
// Larger result sets can be fetched in streaming fashion by calling
// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
func (c *Client) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ExecuteSql[0:len(c.CallOptions.ExecuteSql):len(c.CallOptions.ExecuteSql)], opts...)
var resp *spannerpb.ResultSet
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExecuteSql(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ExecuteStreamingSql like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
// is no limit on the size of the returned result set. However, no
// individual row in the result set can exceed 100 MiB, and no
// column value can exceed 10 MiB.
func (c *Client) ExecuteStreamingSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ExecuteStreamingSql[0:len(c.CallOptions.ExecuteStreamingSql):len(c.CallOptions.ExecuteStreamingSql)], opts...)
var resp spannerpb.Spanner_ExecuteStreamingSqlClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ExecuteStreamingSql(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Read reads rows from the database using key lookups and scans, as a
// simple key/value style alternative to
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to
// return a result set larger than 10 MiB; if the read matches more
// data than that, the read fails with a FAILED_PRECONDITION
// error.
//
// Reads inside read-write transactions might return ABORTED. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
//
// Larger result sets can be yielded in streaming fashion by calling
// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
func (c *Client) Read(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Read[0:len(c.CallOptions.Read):len(c.CallOptions.Read)], opts...)
var resp *spannerpb.ResultSet
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.Read(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// StreamingRead like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
// size of the returned result set. However, no individual row in
// the result set can exceed 100 MiB, and no column value can exceed
// 10 MiB.
func (c *Client) StreamingRead(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.StreamingRead[0:len(c.CallOptions.StreamingRead):len(c.CallOptions.StreamingRead)], opts...)
var resp spannerpb.Spanner_StreamingReadClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.StreamingRead(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// BeginTransaction begins a new transaction. This step can often be skipped:
// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
// side-effect.
func (c *Client) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest, opts ...gax.CallOption) (*spannerpb.Transaction, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...)
var resp *spannerpb.Transaction
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Commit commits a transaction. The request includes the mutations to be
// applied to rows in the database.
//
// Commit might return an ABORTED error. This can occur at any time;
// commonly, the cause is conflicts with concurrent
// transactions. However, it can also happen for a variety of other
// reasons. If Commit returns ABORTED, the caller should re-attempt
// the transaction from the beginning, re-using the same session.
func (c *Client) Commit(ctx context.Context, req *spannerpb.CommitRequest, opts ...gax.CallOption) (*spannerpb.CommitResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...)
var resp *spannerpb.CommitResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.Commit(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Rollback rolls back a transaction, releasing any locks it holds. It is a good
// idea to call this for any transaction that includes one or more
// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
// ultimately decides not to commit.
//
// Rollback returns OK if it successfully aborts the transaction, the
// transaction was already aborted, or the transaction is not
// found. Rollback never returns ABORTED.
func (c *Client) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.Rollback(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

Some files were not shown because too many files have changed in this diff Show more