vendor: update gcs driver dependencies files
Signed-off-by: Flavian Missi <fmissi@redhat.com>
This commit is contained in:
parent
695102895b
commit
817dd286c1
460 changed files with 107861 additions and 10376 deletions
2
vendor/google.golang.org/cloud/LICENSE → vendor/cloud.google.com/go/compute/LICENSE
generated
vendored
2
vendor/google.golang.org/cloud/LICENSE → vendor/cloud.google.com/go/compute/LICENSE
generated
vendored
|
@ -187,7 +187,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
18
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
Normal file
18
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.18.0"
|
19
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
Normal file
19
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Changes
|
||||
|
||||
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165)
|
||||
|
||||
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
|
||||
|
||||
## [0.1.0] (2022-10-26)
|
||||
|
||||
Initial release of metadata being it's own module.
|
202
vendor/cloud.google.com/go/compute/metadata/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/compute/metadata/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
Normal file
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Compute API
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata)
|
||||
|
||||
This is a utility library for communicating with Google Cloud metadata service
|
||||
on Google Cloud.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get cloud.google.com/go/compute/metadata
|
||||
```
|
||||
|
||||
## Go Version Support
|
||||
|
||||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
|
||||
section in the root directory's README.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
||||
document for details.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms. See
|
||||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
52
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
52
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -16,7 +16,7 @@
|
|||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
// as documented at https://cloud.google.com/compute/docs/metadata/overview.
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
|
@ -61,14 +61,20 @@ var (
|
|||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
var defaultClient = &Client{hc: newDefaultHTTPClient()}
|
||||
|
||||
func newDefaultHTTPClient() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
IdleConnTimeout: 60 * time.Second,
|
||||
},
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
|
@ -130,7 +136,7 @@ func testOnGCE() bool {
|
|||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||
res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
|
@ -140,7 +146,8 @@ func testOnGCE() bool {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
|
||||
resolver := &net.Resolver{}
|
||||
addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
|
@ -282,6 +289,7 @@ func NewClient(c *http.Client) *Client {
|
|||
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||
// This func is otherwise equivalent to Get.
|
||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
ctx := context.TODO()
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
|
@ -304,9 +312,25 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|||
}
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
var res *http.Response
|
||||
var reqErr error
|
||||
retryer := newRetryer()
|
||||
for {
|
||||
res, reqErr = c.hc.Do(req)
|
||||
var code int
|
||||
if res != nil {
|
||||
code = res.StatusCode
|
||||
}
|
||||
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
|
||||
if err := sleep(ctx, delay); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if reqErr != nil {
|
||||
return "", "", reqErr
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
|
|
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
Normal file
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRetryAttempts = 5
|
||||
)
|
||||
|
||||
var (
|
||||
syscallRetryable = func(err error) bool { return false }
|
||||
)
|
||||
|
||||
// defaultBackoff is basically equivalent to gax.Backoff without the need for
|
||||
// the dependency.
|
||||
type defaultBackoff struct {
|
||||
max time.Duration
|
||||
mul float64
|
||||
cur time.Duration
|
||||
}
|
||||
|
||||
func (b *defaultBackoff) Pause() time.Duration {
|
||||
d := time.Duration(1 + rand.Int63n(int64(b.cur)))
|
||||
b.cur = time.Duration(float64(b.cur) * b.mul)
|
||||
if b.cur > b.max {
|
||||
b.cur = b.max
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// sleep is the equivalent of gax.Sleep without the need for the dependency.
|
||||
func sleep(ctx context.Context, d time.Duration) error {
|
||||
t := time.NewTimer(d)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Stop()
|
||||
return ctx.Err()
|
||||
case <-t.C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newRetryer() *metadataRetryer {
|
||||
return &metadataRetryer{bo: &defaultBackoff{
|
||||
cur: 100 * time.Millisecond,
|
||||
max: 30 * time.Second,
|
||||
mul: 2,
|
||||
}}
|
||||
}
|
||||
|
||||
type backoff interface {
|
||||
Pause() time.Duration
|
||||
}
|
||||
|
||||
type metadataRetryer struct {
|
||||
bo backoff
|
||||
attempts int
|
||||
}
|
||||
|
||||
func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) {
|
||||
if status == http.StatusOK {
|
||||
return 0, false
|
||||
}
|
||||
retryOk := shouldRetry(status, err)
|
||||
if !retryOk {
|
||||
return 0, false
|
||||
}
|
||||
if r.attempts == maxRetryAttempts {
|
||||
return 0, false
|
||||
}
|
||||
r.attempts++
|
||||
return r.bo.Pause(), true
|
||||
}
|
||||
|
||||
func shouldRetry(status int, err error) bool {
|
||||
if 500 <= status && status <= 599 {
|
||||
return true
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
// Transient network errors should be retried.
|
||||
if syscallRetryable(err) {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(interface{ Temporary() bool }); ok {
|
||||
if err.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if err, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(status, err.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
Normal file
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package metadata
|
||||
|
||||
import "syscall"
|
||||
|
||||
func init() {
|
||||
// Initialize syscallRetryable to return true on transient socket-level
|
||||
// errors. These errors are specific to Linux.
|
||||
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
|
||||
}
|
23
vendor/cloud.google.com/go/compute/metadata/tidyfix.go
generated
vendored
Normal file
23
vendor/cloud.google.com/go/compute/metadata/tidyfix.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the {{.RootMod}} import, won't actually become part of
|
||||
// the resultant binary.
|
||||
//go:build modhack
|
||||
// +build modhack
|
||||
|
||||
package metadata
|
||||
|
||||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "cloud.google.com/go/compute/internal"
|
90
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
Normal file
90
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
# Changes
|
||||
|
||||
## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb))
|
||||
|
||||
## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8))
|
||||
|
||||
## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
|
||||
|
||||
## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca))
|
||||
|
||||
## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38))
|
||||
|
||||
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
|
||||
|
||||
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc))
|
||||
|
||||
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7))
|
||||
|
||||
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50))
|
||||
|
||||
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
|
||||
|
||||
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e))
|
||||
|
||||
### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9))
|
||||
|
||||
## v0.1.0
|
||||
|
||||
This is the first tag to carve out iam as its own module. See
|
||||
[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository).
|
202
vendor/cloud.google.com/go/iam/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/iam/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
40
vendor/cloud.google.com/go/iam/README.md
generated
vendored
Normal file
40
vendor/cloud.google.com/go/iam/README.md
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# IAM API
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/iam.svg)](https://pkg.go.dev/cloud.google.com/go/iam)
|
||||
|
||||
Go Client Library for IAM API.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get cloud.google.com/go/iam
|
||||
```
|
||||
|
||||
## Stability
|
||||
|
||||
The stability of this module is indicated by SemVer.
|
||||
|
||||
However, a `v1+` module may have breaking changes in two scenarios:
|
||||
|
||||
* Packages with `alpha` or `beta` in the import path
|
||||
* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature).
|
||||
|
||||
## Go Version Support
|
||||
|
||||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
|
||||
section in the root directory's README.
|
||||
|
||||
## Authorization
|
||||
|
||||
See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization)
|
||||
section in the root directory's README.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
||||
document for details.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms. See
|
||||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
672
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
Normal file
672
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,672 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: google/iam/v1/iam_policy.proto
|
||||
|
||||
package iampb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Request message for `SetIamPolicy` method.
|
||||
type SetIamPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// REQUIRED: The resource for which the policy is being specified.
|
||||
// See the operation documentation for the appropriate value for this field.
|
||||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
// REQUIRED: The complete policy to be applied to the `resource`. The size of
|
||||
// the policy is limited to a few 10s of KB. An empty policy is a
|
||||
// valid policy but certain Cloud Platform services (such as Projects)
|
||||
// might reject them.
|
||||
Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
|
||||
// OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
|
||||
// the fields in the mask will be modified. If no mask is provided, the
|
||||
// following default mask is used:
|
||||
//
|
||||
// `paths: "bindings, etag"`
|
||||
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SetIamPolicyRequest) Reset() {
|
||||
*x = SetIamPolicyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SetIamPolicyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SetIamPolicyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *SetIamPolicyRequest) GetResource() string {
|
||||
if x != nil {
|
||||
return x.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *SetIamPolicyRequest) GetPolicy() *Policy {
|
||||
if x != nil {
|
||||
return x.Policy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
|
||||
if x != nil {
|
||||
return x.UpdateMask
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Request message for `GetIamPolicy` method.
|
||||
type GetIamPolicyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// REQUIRED: The resource for which the policy is being requested.
|
||||
// See the operation documentation for the appropriate value for this field.
|
||||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
// OPTIONAL: A `GetPolicyOptions` object for specifying options to
|
||||
// `GetIamPolicy`.
|
||||
Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetIamPolicyRequest) Reset() {
|
||||
*x = GetIamPolicyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetIamPolicyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetIamPolicyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetIamPolicyRequest) GetResource() string {
|
||||
if x != nil {
|
||||
return x.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions {
|
||||
if x != nil {
|
||||
return x.Options
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Request message for `TestIamPermissions` method.
|
||||
type TestIamPermissionsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// REQUIRED: The resource for which the policy detail is being requested.
|
||||
// See the operation documentation for the appropriate value for this field.
|
||||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
// The set of permissions to check for the `resource`. Permissions with
|
||||
// wildcards (such as '*' or 'storage.*') are not allowed. For more
|
||||
// information see
|
||||
// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
|
||||
Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsRequest) Reset() {
|
||||
*x = TestIamPermissionsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TestIamPermissionsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsRequest) GetResource() string {
|
||||
if x != nil {
|
||||
return x.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsRequest) GetPermissions() []string {
|
||||
if x != nil {
|
||||
return x.Permissions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response message for `TestIamPermissions` method.
|
||||
type TestIamPermissionsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A subset of `TestPermissionsRequest.permissions` that the caller is
|
||||
// allowed.
|
||||
Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsResponse) Reset() {
|
||||
*x = TestIamPermissionsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TestIamPermissionsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *TestIamPermissionsResponse) GetPermissions() []string {
|
||||
if x != nil {
|
||||
return x.Permissions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{
|
||||
0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a,
|
||||
0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
|
||||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
|
||||
0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
||||
0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65,
|
||||
0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01,
|
||||
0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a,
|
||||
0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
||||
0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
|
||||
0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a,
|
||||
0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01,
|
||||
0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f,
|
||||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
|
||||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61,
|
||||
0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a,
|
||||
0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65,
|
||||
0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42,
|
||||
0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
|
||||
0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
|
||||
0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
||||
0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
|
||||
0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
|
||||
0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69,
|
||||
0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
|
||||
0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
||||
0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72,
|
||||
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49,
|
||||
0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12,
|
||||
0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
|
||||
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73,
|
||||
0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22,
|
||||
0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a,
|
||||
0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
|
||||
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d,
|
||||
0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e,
|
||||
0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
||||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69,
|
||||
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
|
||||
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once
|
||||
file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte {
|
||||
file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() {
|
||||
file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData)
|
||||
})
|
||||
return file_google_iam_v1_iam_policy_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{
|
||||
(*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest
|
||||
(*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest
|
||||
(*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest
|
||||
(*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse
|
||||
(*Policy)(nil), // 4: google.iam.v1.Policy
|
||||
(*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask
|
||||
(*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions
|
||||
}
|
||||
var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{
|
||||
4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy
|
||||
5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask
|
||||
6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions
|
||||
0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
|
||||
1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
|
||||
2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
|
||||
4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy
|
||||
4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy
|
||||
3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
|
||||
6, // [6:9] is the sub-list for method output_type
|
||||
3, // [3:6] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_iam_v1_iam_policy_proto_init() }
|
||||
func file_google_iam_v1_iam_policy_proto_init() {
|
||||
if File_google_iam_v1_iam_policy_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_iam_v1_options_proto_init()
|
||||
file_google_iam_v1_policy_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SetIamPolicyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetIamPolicyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TestIamPermissionsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TestIamPermissionsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_google_iam_v1_iam_policy_proto_goTypes,
|
||||
DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs,
|
||||
MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_iam_v1_iam_policy_proto = out.File
|
||||
file_google_iam_v1_iam_policy_proto_rawDesc = nil
|
||||
file_google_iam_v1_iam_policy_proto_goTypes = nil
|
||||
file_google_iam_v1_iam_policy_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// IAMPolicyClient is the client API for IAMPolicy service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type IAMPolicyClient interface {
|
||||
// Sets the access control policy on the specified resource. Replaces any
|
||||
// existing policy.
|
||||
//
|
||||
// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
|
||||
SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
|
||||
// Gets the access control policy for a resource.
|
||||
// Returns an empty policy if the resource exists and does not have a policy
|
||||
// set.
|
||||
GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
|
||||
// Returns permissions that a caller has on the specified resource.
|
||||
// If the resource does not exist, this will return an empty set of
|
||||
// permissions, not a `NOT_FOUND` error.
|
||||
//
|
||||
// Note: This operation is designed to be used for building permission-aware
|
||||
// UIs and command-line tools, not for authorization checking. This operation
|
||||
// may "fail open" without warning.
|
||||
TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error)
|
||||
}
|
||||
|
||||
type iAMPolicyClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient {
|
||||
return &iAMPolicyClient{cc}
|
||||
}
|
||||
|
||||
func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
|
||||
out := new(Policy)
|
||||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
|
||||
out := new(Policy)
|
||||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
|
||||
out := new(TestIamPermissionsResponse)
|
||||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// IAMPolicyServer is the server API for IAMPolicy service.
|
||||
type IAMPolicyServer interface {
|
||||
// Sets the access control policy on the specified resource. Replaces any
|
||||
// existing policy.
|
||||
//
|
||||
// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
|
||||
SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error)
|
||||
// Gets the access control policy for a resource.
|
||||
// Returns an empty policy if the resource exists and does not have a policy
|
||||
// set.
|
||||
GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error)
|
||||
// Returns permissions that a caller has on the specified resource.
|
||||
// If the resource does not exist, this will return an empty set of
|
||||
// permissions, not a `NOT_FOUND` error.
|
||||
//
|
||||
// Note: This operation is designed to be used for building permission-aware
|
||||
// UIs and command-line tools, not for authorization checking. This operation
|
||||
// may "fail open" without warning.
|
||||
TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedIAMPolicyServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
|
||||
}
|
||||
func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
|
||||
}
|
||||
func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
|
||||
}
|
||||
|
||||
func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
|
||||
s.RegisterService(&_IAMPolicy_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SetIamPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetIamPolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TestIamPermissionsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "google.iam.v1.IAMPolicy",
|
||||
HandlerType: (*IAMPolicyServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "SetIamPolicy",
|
||||
Handler: _IAMPolicy_SetIamPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetIamPolicy",
|
||||
Handler: _IAMPolicy_GetIamPolicy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "TestIamPermissions",
|
||||
Handler: _IAMPolicy_TestIamPermissions_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/iam/v1/iam_policy.proto",
|
||||
}
|
187
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
Normal file
187
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: google/iam/v1/options.proto
|
||||
|
||||
package iampb
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Encapsulates settings provided to GetIamPolicy.
|
||||
type GetPolicyOptions struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Optional. The maximum policy version that will be used to format the
|
||||
// policy.
|
||||
//
|
||||
// Valid values are 0, 1, and 3. Requests specifying an invalid value will be
|
||||
// rejected.
|
||||
//
|
||||
// Requests for policies with any conditional role bindings must specify
|
||||
// version 3. Policies with no conditional role bindings may specify any valid
|
||||
// value or leave the field unset.
|
||||
//
|
||||
// The policy in the response might use the policy version that you specified,
|
||||
// or it might use a lower policy version. For example, if you specify version
|
||||
// 3, but the policy has no conditional role bindings, the response uses
|
||||
// version 1.
|
||||
//
|
||||
// To learn which resources support conditions in their IAM policies, see the
|
||||
// [IAM
|
||||
// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
|
||||
RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetPolicyOptions) Reset() {
|
||||
*x = GetPolicyOptions{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_iam_v1_options_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetPolicyOptions) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetPolicyOptions) ProtoMessage() {}
|
||||
|
||||
func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_iam_v1_options_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead.
|
||||
func (*GetPolicyOptions) Descriptor() ([]byte, []int) {
|
||||
return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 {
|
||||
if x != nil {
|
||||
return x.RequestedPolicyVersion
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_google_iam_v1_options_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_iam_v1_options_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10,
|
||||
0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f,
|
||||
0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63,
|
||||
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31,
|
||||
0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
||||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69,
|
||||
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
|
||||
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_iam_v1_options_proto_rawDescOnce sync.Once
|
||||
file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_iam_v1_options_proto_rawDescGZIP() []byte {
|
||||
file_google_iam_v1_options_proto_rawDescOnce.Do(func() {
|
||||
file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData)
|
||||
})
|
||||
return file_google_iam_v1_options_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_iam_v1_options_proto_goTypes = []interface{}{
|
||||
(*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions
|
||||
}
|
||||
var file_google_iam_v1_options_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_iam_v1_options_proto_init() }
|
||||
func file_google_iam_v1_options_proto_init() {
|
||||
if File_google_iam_v1_options_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetPolicyOptions); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_iam_v1_options_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_iam_v1_options_proto_goTypes,
|
||||
DependencyIndexes: file_google_iam_v1_options_proto_depIdxs,
|
||||
MessageInfos: file_google_iam_v1_options_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_iam_v1_options_proto = out.File
|
||||
file_google_iam_v1_options_proto_rawDesc = nil
|
||||
file_google_iam_v1_options_proto_goTypes = nil
|
||||
file_google_iam_v1_options_proto_depIdxs = nil
|
||||
}
|
1169
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
Normal file
1169
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
387
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
Normal file
387
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
Normal file
|
@ -0,0 +1,387 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package iam supports the resource-specific operations of Google Cloud
|
||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
||||
// See https://cloud.google.com/iam for more about IAM.
|
||||
//
|
||||
// Users of the Google Cloud Libraries will typically not use this package
|
||||
// directly. Instead they will begin with some resource that supports IAM, like
|
||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
||||
package iam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pb "cloud.google.com/go/iam/apiv1/iampb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||
type client interface {
|
||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||
GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error)
|
||||
}
|
||||
|
||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||
type grpcClient struct {
|
||||
c pb.IAMPolicyClient
|
||||
}
|
||||
|
||||
var withRetry = gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60 * time.Second,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
})
|
||||
|
||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||
return g.GetWithVersion(ctx, resource, 1)
|
||||
}
|
||||
|
||||
func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) {
|
||||
var proto *pb.Policy
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||
ctx = insertMetadata(ctx, md)
|
||||
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
var err error
|
||||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Options: &pb.GetPolicyOptions{
|
||||
RequestedPolicyVersion: requestedPolicyVersion,
|
||||
},
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto, nil
|
||||
}
|
||||
|
||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||
ctx = insertMetadata(ctx, md)
|
||||
|
||||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Policy: p,
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
}
|
||||
|
||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
var res *pb.TestIamPermissionsResponse
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
||||
ctx = insertMetadata(ctx, md)
|
||||
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
var err error
|
||||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: resource,
|
||||
Permissions: perms,
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
// A Handle provides IAM operations for a resource.
|
||||
type Handle struct {
|
||||
c client
|
||||
resource string
|
||||
}
|
||||
|
||||
// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions).
|
||||
type Handle3 struct {
|
||||
c client
|
||||
resource string
|
||||
version int32
|
||||
}
|
||||
|
||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandle returns a Handle for resource.
|
||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||
func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle {
|
||||
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// grpc service that implements IAM as a mixin
|
||||
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
|
||||
return InternalNewHandleClient(&grpcClient{c: c}, resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// client implementation.
|
||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||
return &Handle{
|
||||
c: c,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// V3 returns a Handle3, which is like Handle except it sets
|
||||
// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3
|
||||
// when storing a policy.
|
||||
func (h *Handle) V3() *Handle3 {
|
||||
return &Handle3{
|
||||
c: h.c,
|
||||
resource: h.resource,
|
||||
version: 3,
|
||||
}
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||
proto, err := h.c.Get(ctx, h.resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Policy{InternalProto: proto}, nil
|
||||
}
|
||||
|
||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||
//
|
||||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
||||
|
||||
// A RoleName is a name representing a collection of permissions.
|
||||
type RoleName string
|
||||
|
||||
// Common role names.
|
||||
const (
|
||||
Owner RoleName = "roles/owner"
|
||||
Editor RoleName = "roles/editor"
|
||||
Viewer RoleName = "roles/viewer"
|
||||
)
|
||||
|
||||
const (
|
||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
||||
AllUsers = "allUsers"
|
||||
|
||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// A Policy is a list of Bindings representing roles
|
||||
// granted to members.
|
||||
//
|
||||
// The zero Policy is a valid policy with no bindings.
|
||||
type Policy struct {
|
||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
||||
// and provide an exported alias here.
|
||||
|
||||
// This field is exported for use by the Google Cloud Libraries only.
|
||||
// It may become unexported in a future release.
|
||||
InternalProto *pb.Policy
|
||||
}
|
||||
|
||||
// Members returns the list of members with the supplied role.
|
||||
// The return value should not be modified. Use Add and Remove
|
||||
// to modify the members of a role.
|
||||
func (p *Policy) Members(r RoleName) []string {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.Members
|
||||
}
|
||||
|
||||
// HasRole reports whether member has role r.
|
||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
||||
return memberIndex(member, p.binding(r)) >= 0
|
||||
}
|
||||
|
||||
// Add adds member member to role r if it is not already present.
|
||||
// A new binding is created if there is no binding for the role.
|
||||
func (p *Policy) Add(member string, r RoleName) {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
if p.InternalProto == nil {
|
||||
p.InternalProto = &pb.Policy{}
|
||||
}
|
||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
||||
Role: string(r),
|
||||
Members: []string{member},
|
||||
})
|
||||
return
|
||||
}
|
||||
if memberIndex(member, b) < 0 {
|
||||
b.Members = append(b.Members, member)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes member from role r if it is present.
|
||||
func (p *Policy) Remove(member string, r RoleName) {
|
||||
bi := p.bindingIndex(r)
|
||||
if bi < 0 {
|
||||
return
|
||||
}
|
||||
bindings := p.InternalProto.Bindings
|
||||
b := bindings[bi]
|
||||
mi := memberIndex(member, b)
|
||||
if mi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||
// into the removed spot and shrink the slice.
|
||||
if len(b.Members) == 1 {
|
||||
// Remove binding.
|
||||
last := len(bindings) - 1
|
||||
bindings[bi] = bindings[last]
|
||||
bindings[last] = nil
|
||||
p.InternalProto.Bindings = bindings[:last]
|
||||
return
|
||||
}
|
||||
// Remove member.
|
||||
// TODO(jba): worry about multiple copies of m?
|
||||
last := len(b.Members) - 1
|
||||
b.Members[mi] = b.Members[last]
|
||||
b.Members[last] = ""
|
||||
b.Members = b.Members[:last]
|
||||
}
|
||||
|
||||
// Roles returns the names of all the roles that appear in the Policy.
|
||||
func (p *Policy) Roles() []RoleName {
|
||||
if p.InternalProto == nil {
|
||||
return nil
|
||||
}
|
||||
var rns []RoleName
|
||||
for _, b := range p.InternalProto.Bindings {
|
||||
rns = append(rns, RoleName(b.Role))
|
||||
}
|
||||
return rns
|
||||
}
|
||||
|
||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||
i := p.bindingIndex(r)
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return p.InternalProto.Bindings[i]
|
||||
}
|
||||
|
||||
func (p *Policy) bindingIndex(r RoleName) int {
|
||||
if p.InternalProto == nil {
|
||||
return -1
|
||||
}
|
||||
for i, b := range p.InternalProto.Bindings {
|
||||
if b.Role == string(r) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||
func memberIndex(m string, b *pb.Binding) int {
|
||||
if b == nil {
|
||||
return -1
|
||||
}
|
||||
for i, mm := range b.Members {
|
||||
if mm == m {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// insertMetadata inserts metadata into the given context
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// A Policy3 is a list of Bindings representing roles granted to members.
|
||||
//
|
||||
// The zero Policy3 is a valid policy with no bindings.
|
||||
//
|
||||
// It is similar to a Policy, except a Policy3 provides direct access to the
|
||||
// list of Bindings.
|
||||
//
|
||||
// The policy version is always set to 3.
|
||||
type Policy3 struct {
|
||||
etag []byte
|
||||
Bindings []*pb.Binding
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
//
|
||||
// requestedPolicyVersion is always set to 3.
|
||||
func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) {
|
||||
proto, err := h.c.GetWithVersion(ctx, h.resource, h.version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Policy3{
|
||||
Bindings: proto.Bindings,
|
||||
etag: proto.Etag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||
//
|
||||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error {
|
||||
return h.c.Set(ctx, h.resource, &pb.Policy{
|
||||
Bindings: policy.Bindings,
|
||||
Etag: policy.etag,
|
||||
Version: h.version,
|
||||
})
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
2009
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
Normal file
2009
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
43
vendor/cloud.google.com/go/internal/README.md
generated
vendored
Normal file
43
vendor/cloud.google.com/go/internal/README.md
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
# Internal
|
||||
|
||||
This directory contains internal code for cloud.google.com/go packages.
|
||||
|
||||
## .repo-metadata-full.json
|
||||
|
||||
`.repo-metadata-full.json` contains metadata about the packages in this repo. It
|
||||
is generated by `internal/gapicgen/generator`. It's processed by external tools
|
||||
to build lists of all of the packages.
|
||||
|
||||
Don't make breaking changes to the format without consulting with the external
|
||||
tools.
|
||||
|
||||
One day, we may want to create individual `.repo-metadata.json` files next to
|
||||
each package, which is the pattern followed by some other languages. External
|
||||
tools would then talk to pkg.go.dev or some other service to get the overall
|
||||
list of packages and use the `.repo-metadata.json` files to get the additional
|
||||
metadata required. For now, `.repo-metadata-full.json` includes everything.
|
||||
|
||||
## cloudbuild.yaml
|
||||
|
||||
To kick off a build locally run from the repo root:
|
||||
|
||||
```bash
|
||||
gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml
|
||||
```
|
||||
|
||||
### Updating OwlBot SHA
|
||||
|
||||
You may want to manually update the which version of the post processor will be
|
||||
used -- to do this you need to update the SHA in the OwlBot lock file. Start by
|
||||
running the following commands:
|
||||
|
||||
```bash
|
||||
docker pull gcr.io/cloud-devrel-public-resources/owlbot-go:latest
|
||||
docker inspect --format='{{index .RepoDigests 0}}' gcr.io/cloud-devrel-public-resources/owlbot-go:latest
|
||||
```
|
||||
|
||||
This will give you a SHA. You can use this value to update the value in
|
||||
`.github/.OwlBot.lock.yaml`.
|
||||
|
||||
*Note*: OwlBot will eventually open a pull request to update this value if it
|
||||
discovers a new version of the container.
|
55
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
Normal file
55
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Annotate prepends msg to the error message in err, attempting
|
||||
// to preserve other information in err, like an error code.
|
||||
//
|
||||
// Annotate panics if err is nil.
|
||||
//
|
||||
// Annotate knows about these error types:
|
||||
// - "google.golang.org/grpc/status".Status
|
||||
// - "google.golang.org/api/googleapi".Error
|
||||
// If the error is not one of these types, Annotate behaves
|
||||
// like
|
||||
//
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
func Annotate(err error, msg string) error {
|
||||
if err == nil {
|
||||
panic("Annotate called with nil")
|
||||
}
|
||||
if s, ok := status.FromError(err); ok {
|
||||
p := s.Proto()
|
||||
p.Message = msg + ": " + p.Message
|
||||
return status.ErrorProto(p)
|
||||
}
|
||||
if g, ok := err.(*googleapi.Error); ok {
|
||||
g.Message = msg + ": " + g.Message
|
||||
return g
|
||||
}
|
||||
return fmt.Errorf("%s: %v", msg, err)
|
||||
}
|
||||
|
||||
// Annotatef uses format and args to format a string, then calls Annotate.
|
||||
func Annotatef(err error, format string, args ...interface{}) error {
|
||||
return Annotate(err, fmt.Sprintf(format, args...))
|
||||
}
|
25
vendor/cloud.google.com/go/internal/cloudbuild.yaml
generated
vendored
Normal file
25
vendor/cloud.google.com/go/internal/cloudbuild.yaml
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# note: /workspace is a special directory in the docker image where all the files in this folder
|
||||
# get placed on your behalf
|
||||
|
||||
timeout: 7200s # 2 hours
|
||||
steps:
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
args: ['build', '-t', 'gcr.io/cloud-devrel-public-resources/owlbot-go', '-f', 'postprocessor/Dockerfile', '.']
|
||||
dir: internal
|
||||
|
||||
images:
|
||||
- gcr.io/cloud-devrel-public-resources/owlbot-go:latest
|
108
vendor/cloud.google.com/go/internal/optional/optional.go
generated
vendored
Normal file
108
vendor/cloud.google.com/go/internal/optional/optional.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package optional provides versions of primitive types that can
|
||||
// be nil. These are useful in methods that update some of an API object's
|
||||
// fields.
|
||||
package optional
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// Bool is either a bool or nil.
|
||||
Bool interface{}
|
||||
|
||||
// String is either a string or nil.
|
||||
String interface{}
|
||||
|
||||
// Int is either an int or nil.
|
||||
Int interface{}
|
||||
|
||||
// Uint is either a uint or nil.
|
||||
Uint interface{}
|
||||
|
||||
// Float64 is either a float64 or nil.
|
||||
Float64 interface{}
|
||||
|
||||
// Duration is either a time.Duration or nil.
|
||||
Duration interface{}
|
||||
)
|
||||
|
||||
// ToBool returns its argument as a bool.
|
||||
// It panics if its argument is nil or not a bool.
|
||||
func ToBool(v Bool) bool {
|
||||
x, ok := v.(bool)
|
||||
if !ok {
|
||||
doPanic("Bool", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToString returns its argument as a string.
|
||||
// It panics if its argument is nil or not a string.
|
||||
func ToString(v String) string {
|
||||
x, ok := v.(string)
|
||||
if !ok {
|
||||
doPanic("String", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToInt returns its argument as an int.
|
||||
// It panics if its argument is nil or not an int.
|
||||
func ToInt(v Int) int {
|
||||
x, ok := v.(int)
|
||||
if !ok {
|
||||
doPanic("Int", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToUint returns its argument as a uint.
|
||||
// It panics if its argument is nil or not a uint.
|
||||
func ToUint(v Uint) uint {
|
||||
x, ok := v.(uint)
|
||||
if !ok {
|
||||
doPanic("Uint", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToFloat64 returns its argument as a float64.
|
||||
// It panics if its argument is nil or not a float64.
|
||||
func ToFloat64(v Float64) float64 {
|
||||
x, ok := v.(float64)
|
||||
if !ok {
|
||||
doPanic("Float64", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToDuration returns its argument as a time.Duration.
|
||||
// It panics if its argument is nil or not a time.Duration.
|
||||
func ToDuration(v Duration) time.Duration {
|
||||
x, ok := v.(time.Duration)
|
||||
if !ok {
|
||||
doPanic("Duration", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func doPanic(capType string, v interface{}) {
|
||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
||||
}
|
85
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
Normal file
85
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Retry calls the supplied function f repeatedly according to the provided
|
||||
// backoff parameters. It returns when one of the following occurs:
|
||||
// When f's first return value is true, Retry immediately returns with f's second
|
||||
// return value.
|
||||
// When the provided context is done, Retry returns with an error that
|
||||
// includes both ctx.Error() and the last error returned by f.
|
||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
||||
return retry(ctx, bo, f, gax.Sleep)
|
||||
}
|
||||
|
||||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
||||
sleep func(context.Context, time.Duration) error) error {
|
||||
var lastErr error
|
||||
for {
|
||||
stop, err := f()
|
||||
if stop {
|
||||
return err
|
||||
}
|
||||
// Remember the last "real" error from f.
|
||||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
||||
lastErr = err
|
||||
}
|
||||
p := bo.Pause()
|
||||
if ctxErr := sleep(ctx, p); ctxErr != nil {
|
||||
if lastErr != nil {
|
||||
return wrappedCallErr{ctxErr: ctxErr, wrappedErr: lastErr}
|
||||
}
|
||||
return ctxErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use this error type to return an error which allows introspection of both
|
||||
// the context error and the error from the service.
|
||||
type wrappedCallErr struct {
|
||||
ctxErr error
|
||||
wrappedErr error
|
||||
}
|
||||
|
||||
func (e wrappedCallErr) Error() string {
|
||||
return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr)
|
||||
}
|
||||
|
||||
func (e wrappedCallErr) Unwrap() error {
|
||||
return e.wrappedErr
|
||||
}
|
||||
|
||||
// Is allows errors.Is to match the error from the call as well as context
|
||||
// sentinel errors.
|
||||
func (e wrappedCallErr) Is(err error) bool {
|
||||
return e.ctxErr == err || e.wrappedErr == err
|
||||
}
|
||||
|
||||
// GRPCStatus allows the wrapped error to be used with status.FromError.
|
||||
func (e wrappedCallErr) GRPCStatus() *status.Status {
|
||||
if s, ok := status.FromError(e.wrappedErr); ok {
|
||||
return s
|
||||
}
|
||||
return nil
|
||||
}
|
111
vendor/cloud.google.com/go/internal/trace/trace.go
generated
vendored
Normal file
111
vendor/cloud.google.com/go/internal/trace/trace.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/genproto/googleapis/rpc/code"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// StartSpan adds a span to the trace with the given name.
|
||||
func StartSpan(ctx context.Context, name string) context.Context {
|
||||
ctx, _ = trace.StartSpan(ctx, name)
|
||||
return ctx
|
||||
}
|
||||
|
||||
// EndSpan ends a span with the given error.
|
||||
func EndSpan(ctx context.Context, err error) {
|
||||
span := trace.FromContext(ctx)
|
||||
if err != nil {
|
||||
span.SetStatus(toStatus(err))
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
|
||||
// toStatus interrogates an error and converts it to an appropriate
|
||||
// OpenCensus status.
|
||||
func toStatus(err error) trace.Status {
|
||||
var err2 *googleapi.Error
|
||||
if ok := xerrors.As(err, &err2); ok {
|
||||
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
|
||||
} else if s, ok := status.FromError(err); ok {
|
||||
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
|
||||
} else {
|
||||
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
|
||||
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
||||
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
|
||||
switch httpStatusCode {
|
||||
case 200:
|
||||
return int32(code.Code_OK)
|
||||
case 499:
|
||||
return int32(code.Code_CANCELLED)
|
||||
case 500:
|
||||
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
||||
case 400:
|
||||
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
||||
case 504:
|
||||
return int32(code.Code_DEADLINE_EXCEEDED)
|
||||
case 404:
|
||||
return int32(code.Code_NOT_FOUND)
|
||||
case 409:
|
||||
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
||||
case 403:
|
||||
return int32(code.Code_PERMISSION_DENIED)
|
||||
case 401:
|
||||
return int32(code.Code_UNAUTHENTICATED)
|
||||
case 429:
|
||||
return int32(code.Code_RESOURCE_EXHAUSTED)
|
||||
case 501:
|
||||
return int32(code.Code_UNIMPLEMENTED)
|
||||
case 503:
|
||||
return int32(code.Code_UNAVAILABLE)
|
||||
default:
|
||||
return int32(code.Code_UNKNOWN)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: (odeke-em): perhaps just pass around spans due to the cost
|
||||
// incurred from using trace.FromContext(ctx) yet we could avoid
|
||||
// throwing away the work done by ctx, span := trace.StartSpan.
|
||||
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
||||
var attrs []trace.Attribute
|
||||
for k, v := range attrMap {
|
||||
var a trace.Attribute
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
a = trace.StringAttribute(k, v)
|
||||
case bool:
|
||||
a = trace.BoolAttribute(k, v)
|
||||
case int:
|
||||
a = trace.Int64Attribute(k, int64(v))
|
||||
case int64:
|
||||
a = trace.Int64Attribute(k, v)
|
||||
default:
|
||||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
||||
}
|
||||
attrs = append(attrs, a)
|
||||
}
|
||||
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
||||
}
|
19
vendor/cloud.google.com/go/internal/version/update_version.sh
generated
vendored
Normal file
19
vendor/cloud.google.com/go/internal/version/update_version.sh
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
today=$(date +%Y%m%d)
|
||||
|
||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
||||
|
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
Normal file
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate ./update_version.sh
|
||||
|
||||
// Package version contains version information for Google Cloud Client
|
||||
// Libraries for Go, as reported in request headers.
|
||||
package version
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20201104"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
func Go() string {
|
||||
return goVersion
|
||||
}
|
||||
|
||||
var goVersion = goVer(runtime.Version())
|
||||
|
||||
const develPrefix = "devel +"
|
||||
|
||||
func goVer(s string) string {
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func notSemverRune(r rune) bool {
|
||||
return !strings.ContainsRune("0123456789.", r)
|
||||
}
|
366
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
Normal file
366
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
Normal file
|
@ -0,0 +1,366 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [1.30.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.0...storage/v1.30.1) (2023-03-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Retract versions with Copier bug ([#7583](https://github.com/googleapis/google-cloud-go/issues/7583)) ([9c10b6f](https://github.com/googleapis/google-cloud-go/commit/9c10b6f8a54cb8447260148b5e4a9b5160281020))
|
||||
* Versions v1.25.0-v1.27.0 are retracted due to [#6857](https://github.com/googleapis/google-cloud-go/issues/6857).
|
||||
* **storage:** SignedURL v4 allows headers with colons in value ([#7603](https://github.com/googleapis/google-cloud-go/issues/7603)) ([6b50f9b](https://github.com/googleapis/google-cloud-go/commit/6b50f9b368f5b271ade1706c342865cef46712e6))
|
||||
|
||||
## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.29.0...storage/v1.30.0) (2023-03-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Update routing annotation for CreateBucketRequest docs: Add support for end-to-end checksumming in the gRPC WriteObject flow feat!: BREAKING CHANGE - renaming Notification to NotificationConfig ([2fef56f](https://github.com/googleapis/google-cloud-go/commit/2fef56f75a63dc4ff6e0eea56c7b26d4831c8e27))
|
||||
* **storage:** Json downloads ([#7158](https://github.com/googleapis/google-cloud-go/issues/7158)) ([574a86c](https://github.com/googleapis/google-cloud-go/commit/574a86c614445f8c3f5a54446820df774c31cd47))
|
||||
* **storage:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Specify credentials with STORAGE_EMULATOR_HOST ([#7271](https://github.com/googleapis/google-cloud-go/issues/7271)) ([940ae15](https://github.com/googleapis/google-cloud-go/commit/940ae15f725ff384e345e627feb03d22e1fd8db5))
|
||||
|
||||
## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Add ComponentCount as part of ObjectAttrs ([#7230](https://github.com/googleapis/google-cloud-go/issues/7230)) ([a19bca6](https://github.com/googleapis/google-cloud-go/commit/a19bca60704b4fbb674cf51d828580aa653c8210))
|
||||
* **storage:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage/internal:** Corrected typos and spellings ([7357077](https://github.com/googleapis/google-cloud-go/commit/735707796d81d7f6f32fc3415800c512fe62297e))
|
||||
|
||||
## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5))
|
||||
|
||||
## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1))
|
||||
* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb))
|
||||
* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857)
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3))
|
||||
|
||||
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8))
|
||||
|
||||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e))
|
||||
|
||||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f))
|
||||
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e))
|
||||
|
||||
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d))
|
||||
|
||||
## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d))
|
||||
* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473))
|
||||
|
||||
### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07))
|
||||
* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973))
|
||||
|
||||
## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072))
|
||||
* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341))
|
||||
|
||||
## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749)
|
||||
* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e))
|
||||
|
||||
## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6))
|
||||
|
||||
## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314))
|
||||
* This release contains changes to fully align this library's retry strategy
|
||||
with best practices as described in the
|
||||
Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy).
|
||||
* The library will now retry only idempotent operations by default. This means
|
||||
that for certain operations, including object upload, compose, rewrite,
|
||||
update, and delete, requests will not be retried by default unless
|
||||
[idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency)
|
||||
for the request have been met.
|
||||
* The library now has methods to configure aspects of retry policy for
|
||||
API calls, including which errors are retried, the timing of the
|
||||
exponential backoff, and how idempotency is taken into account.
|
||||
* If you wish to re-enable retries for a non-idempotent request, use the
|
||||
[RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways)
|
||||
policy.
|
||||
* For full details on how to configure retries, see the
|
||||
[package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests)
|
||||
and the
|
||||
[Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy)
|
||||
* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8))
|
||||
* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c))
|
||||
* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0))
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264))
|
||||
|
||||
### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991)
|
||||
|
||||
### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb))
|
||||
|
||||
## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197)
|
||||
* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8))
|
||||
|
||||
## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393))
|
||||
|
||||
### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
|
||||
* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd))
|
||||
* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01))
|
||||
* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1))
|
||||
* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437)
|
||||
* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470)
|
||||
|
||||
## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167)
|
||||
* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040)
|
||||
|
||||
## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2
|
||||
config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)).
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc))
|
||||
|
||||
|
||||
## v1.14.0
|
||||
|
||||
- Updates to various dependencies.
|
||||
|
||||
## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0))
|
||||
* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537))
|
||||
|
||||
## v1.12.0
|
||||
- V4 signed URL fixes:
|
||||
- Fix encoding of spaces in query parameters.
|
||||
- Add fields that were missing from PostPolicyV4 policy conditions.
|
||||
- Fix Query to correctly list prefixes as well as objects when SetAttrSelection
|
||||
is used.
|
||||
|
||||
## v1.11.0
|
||||
- Add support for CustomTime and NoncurrentTime object lifecycle management
|
||||
features.
|
||||
|
||||
## v1.10.0
|
||||
- Bump dependency on google.golang.org/api to capture changes to retry logic
|
||||
which will make retries on writes more resilient.
|
||||
- Improve documentation for Writer.ChunkSize.
|
||||
- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket.
|
||||
|
||||
## v1.9.0
|
||||
- Add retry for transient network errors on most operations (with the exception
|
||||
of writes).
|
||||
- Bump dependency for google.golang.org/api to capture a change in the default
|
||||
HTTP transport which will improve performance for reads under heavy load.
|
||||
- Add CRC32C checksum validation option to Composer.
|
||||
|
||||
## v1.8.0
|
||||
- Add support for V4 signed post policies.
|
||||
|
||||
## v1.7.0
|
||||
- V4 signed URL support:
|
||||
- Add support for bucket-bound domains and virtual hosted style URLs.
|
||||
- Add support for query parameters in the signature.
|
||||
- Fix text encoding to align with standards.
|
||||
- Add the object name to query parameters for write calls.
|
||||
- Fix retry behavior when reading files with Content-Encoding gzip.
|
||||
- Fix response header in reader.
|
||||
- New code examples:
|
||||
- Error handling for `ObjectHandle` preconditions.
|
||||
- Existence checks for buckets and objects.
|
||||
|
||||
## v1.6.0
|
||||
|
||||
- Updated option handling:
|
||||
- Don't drop custom scopes (#1756)
|
||||
- Don't drop port in provided endpoint (#1737)
|
||||
|
||||
## v1.5.0
|
||||
|
||||
- Honor WithEndpoint client option for reads as well as writes.
|
||||
- Add archive storage class to docs.
|
||||
- Make fixes to storage benchwrapper.
|
||||
|
||||
## v1.4.0
|
||||
|
||||
- When listing objects in a bucket, allow callers to specify which attributes
|
||||
are queried. This allows for performance optimization.
|
||||
|
||||
## v1.3.0
|
||||
|
||||
- Use `storage.googleapis.com/storage/v1` by default for GCS requests
|
||||
instead of `www.googleapis.com/storage/v1`.
|
||||
|
||||
## v1.2.1
|
||||
|
||||
- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not
|
||||
being sent in all cases.
|
||||
|
||||
## v1.2.0
|
||||
|
||||
- Add support for UniformBucketLevelAccess. This configures access checks
|
||||
to use only bucket-level IAM policies.
|
||||
See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess.
|
||||
- Fix userAgent to use correct version.
|
||||
|
||||
## v1.1.2
|
||||
|
||||
- Fix memory leak in BucketIterator and ObjectIterator.
|
||||
|
||||
## v1.1.1
|
||||
|
||||
- Send BucketPolicyOnly even when it's disabled.
|
||||
|
||||
## v1.1.0
|
||||
|
||||
- Performance improvements for ObjectIterator and BucketIterator.
|
||||
- Fix Bucket.ObjectIterator size calculation checks.
|
||||
- Added HMACKeyOptions to all the methods which allows for options such as
|
||||
UserProject to be set per invocation and optionally be used.
|
||||
|
||||
## v1.0.0
|
||||
|
||||
This is the first tag to carve out storage as its own module. See:
|
||||
https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
|
202
vendor/cloud.google.com/go/storage/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/storage/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
32
vendor/cloud.google.com/go/storage/README.md
generated
vendored
Normal file
32
vendor/cloud.google.com/go/storage/README.md
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
|
||||
- [About Cloud Storage](https://cloud.google.com/storage/)
|
||||
- [API documentation](https://cloud.google.com/storage/docs)
|
||||
- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `storage.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (storage-1)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
[snip]:# (storage-2)
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
356
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
Normal file
356
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
type ACLRole string
|
||||
|
||||
const (
|
||||
RoleOwner ACLRole = "OWNER"
|
||||
RoleReader ACLRole = "READER"
|
||||
RoleWriter ACLRole = "WRITER"
|
||||
)
|
||||
|
||||
// ACLEntity refers to a user or group.
|
||||
// They are sometimes referred to as grantees.
|
||||
//
|
||||
// It could be in the form of:
|
||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
||||
// "domain-<domain>" and "project-team-<projectId>".
|
||||
//
|
||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
||||
type ACLEntity string
|
||||
|
||||
const (
|
||||
AllUsers ACLEntity = "allUsers"
|
||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a
|
||||
// Google Cloud Storage object or bucket.
|
||||
type ACLRule struct {
|
||||
Entity ACLEntity
|
||||
EntityID string
|
||||
Role ACLRole
|
||||
Domain string
|
||||
Email string
|
||||
ProjectTeam *ProjectTeam
|
||||
}
|
||||
|
||||
// ProjectTeam is the project team associated with the entity, if any.
|
||||
type ProjectTeam struct {
|
||||
ProjectNumber string
|
||||
Team string
|
||||
}
|
||||
|
||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||
// ACLHandle on an object operates on the latest generation of that object by default.
|
||||
// Selecting a specific generation of an object is not currently supported by the client.
|
||||
type ACLHandle struct {
|
||||
c *Client
|
||||
bucket string
|
||||
object string
|
||||
isDefault bool
|
||||
userProject string // for requester-pays buckets
|
||||
retry *retryConfig
|
||||
}
|
||||
|
||||
// Delete permanently deletes the ACL entry for the given entity.
|
||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectDelete(ctx, entity)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultDelete(ctx, entity)
|
||||
}
|
||||
return a.bucketDelete(ctx, entity)
|
||||
}
|
||||
|
||||
// Set sets the role for the given entity.
|
||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectSet(ctx, entity, role, false)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.objectSet(ctx, entity, role, true)
|
||||
}
|
||||
return a.bucketSet(ctx, entity, role)
|
||||
}
|
||||
|
||||
// List retrieves ACL entries.
|
||||
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectList(ctx)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultList(ctx)
|
||||
}
|
||||
return a.bucketList(ctx)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
if isBucketDefault {
|
||||
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||
vc := reflect.ValueOf(call)
|
||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||
if a.userProject != "" {
|
||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
}
|
||||
|
||||
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, toObjectACLRule(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, toObjectACLRuleFromProto(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, toBucketACLRule(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, toBucketACLRuleFromProto(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.Entity),
|
||||
EntityID: a.EntityId,
|
||||
Role: ACLRole(a.Role),
|
||||
Domain: a.Domain,
|
||||
Email: a.Email,
|
||||
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
|
||||
}
|
||||
}
|
||||
|
||||
func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.GetEntity()),
|
||||
EntityID: a.GetEntityId(),
|
||||
Role: ACLRole(a.GetRole()),
|
||||
Domain: a.GetDomain(),
|
||||
Email: a.GetEmail(),
|
||||
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()),
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.Entity),
|
||||
EntityID: a.EntityId,
|
||||
Role: ACLRole(a.Role),
|
||||
Domain: a.Domain,
|
||||
Email: a.Email,
|
||||
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.GetEntity()),
|
||||
EntityID: a.GetEntityId(),
|
||||
Role: ACLRole(a.GetRole()),
|
||||
Domain: a.GetDomain(),
|
||||
Email: a.GetEmail(),
|
||||
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()),
|
||||
}
|
||||
}
|
||||
|
||||
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*raw.ObjectAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*storagepb.ObjectAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*raw.BucketAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*storagepb.BucketAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toProtoBucketAccessControl())
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
|
||||
return &raw.BucketAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
|
||||
return &raw.ObjectAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl {
|
||||
return &storagepb.ObjectAccessControl{
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl {
|
||||
return &storagepb.BucketAccessControl{
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectTeam{
|
||||
ProjectNumber: p.ProjectNumber,
|
||||
Team: p.Team,
|
||||
}
|
||||
}
|
||||
|
||||
func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectTeam{
|
||||
ProjectNumber: p.GetProjectNumber(),
|
||||
Team: p.GetTeam(),
|
||||
}
|
||||
}
|
||||
|
||||
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectTeam{
|
||||
ProjectNumber: p.ProjectNumber,
|
||||
Team: p.Team,
|
||||
}
|
||||
}
|
2165
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
Normal file
2165
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
333
vendor/cloud.google.com/go/storage/client.go
generated
vendored
Normal file
333
vendor/cloud.google.com/go/storage/client.go
generated
vendored
Normal file
|
@ -0,0 +1,333 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/iam/apiv1/iampb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// TODO(noahdietz): Move existing factory methods to this file.
|
||||
|
||||
// storageClient is an internal-only interface designed to separate the
|
||||
// transport-specific logic of making Storage API calls from the logic of the
|
||||
// client library.
|
||||
//
|
||||
// Implementation requirements beyond implementing the interface include:
|
||||
// * factory method(s) must accept a `userProject string` param
|
||||
// * `settings` must be retained per instance
|
||||
// * `storageOption`s must be resolved in the order they are received
|
||||
// * all API errors must be wrapped in the gax-go APIError type
|
||||
// * any unimplemented interface methods must return a StorageUnimplementedErr
|
||||
//
|
||||
// TODO(noahdietz): This interface is currently not used in the production code
|
||||
// paths
|
||||
type storageClient interface {
|
||||
|
||||
// Top-level methods.
|
||||
|
||||
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
|
||||
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
|
||||
Close() error
|
||||
|
||||
// Bucket methods.
|
||||
|
||||
DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
|
||||
GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
|
||||
UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
|
||||
LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
|
||||
ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator
|
||||
|
||||
// Object metadata methods.
|
||||
|
||||
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
|
||||
GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
||||
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
||||
|
||||
// Default Object ACL methods.
|
||||
|
||||
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
||||
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Object ACL methods.
|
||||
|
||||
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error
|
||||
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Media operations.
|
||||
|
||||
ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error)
|
||||
RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error)
|
||||
|
||||
NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error)
|
||||
OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error)
|
||||
|
||||
// IAM methods.
|
||||
|
||||
GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error)
|
||||
SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error
|
||||
TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error)
|
||||
|
||||
// HMAC Key methods.
|
||||
|
||||
GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error
|
||||
|
||||
// Notification methods.
|
||||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
|
||||
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error)
|
||||
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error
|
||||
}
|
||||
|
||||
// settings contains transport-agnostic configuration for API calls made via
|
||||
// the storageClient inteface. All implementations must utilize settings
|
||||
// and respect those that are applicable.
|
||||
type settings struct {
|
||||
// retry is the complete retry configuration to use when evaluating if an
|
||||
// API call should be retried.
|
||||
retry *retryConfig
|
||||
|
||||
// gax is a set of gax.CallOption to be conveyed to gax.Invoke.
|
||||
// Note: Not all storageClient interfaces will must use gax.Invoke.
|
||||
gax []gax.CallOption
|
||||
|
||||
// idempotent indicates if the call is idempotent or not when considering
|
||||
// if the call should be retired or not.
|
||||
idempotent bool
|
||||
|
||||
// clientOption is a set of option.ClientOption to be used during client
|
||||
// transport initialization. See https://pkg.go.dev/google.golang.org/api/option
|
||||
// for a list of supported options.
|
||||
clientOption []option.ClientOption
|
||||
|
||||
// userProject is the user project that should be billed for the request.
|
||||
userProject string
|
||||
}
|
||||
|
||||
func initSettings(opts ...storageOption) *settings {
|
||||
s := &settings{}
|
||||
resolveOptions(s, opts...)
|
||||
return s
|
||||
}
|
||||
|
||||
func resolveOptions(s *settings, opts ...storageOption) {
|
||||
for _, o := range opts {
|
||||
o.Apply(s)
|
||||
}
|
||||
}
|
||||
|
||||
// callSettings is a helper for resolving storage options against the settings
|
||||
// in the context of an individual call. This is to ensure that client-level
|
||||
// default settings are not mutated by two different calls getting options.
|
||||
//
|
||||
// Example: s := callSettings(c.settings, opts...)
|
||||
func callSettings(defaults *settings, opts ...storageOption) *settings {
|
||||
if defaults == nil {
|
||||
return nil
|
||||
}
|
||||
// This does not make a deep copy of the pointer/slice fields, but all
|
||||
// options replace the settings fields rather than modify their values in
|
||||
// place.
|
||||
cs := *defaults
|
||||
resolveOptions(&cs, opts...)
|
||||
return &cs
|
||||
}
|
||||
|
||||
// makeStorageOpts is a helper for generating a set of storageOption based on
|
||||
// idempotency, retryConfig, and userProject. All top-level client operations
|
||||
// will generally have to pass these options through the interface.
|
||||
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption {
|
||||
opts := []storageOption{idempotent(isIdempotent)}
|
||||
if retry != nil {
|
||||
opts = append(opts, withRetryConfig(retry))
|
||||
}
|
||||
if userProject != "" {
|
||||
opts = append(opts, withUserProject(userProject))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// storageOption is the transport-agnostic call option for the storageClient
|
||||
// interface.
|
||||
type storageOption interface {
|
||||
Apply(s *settings)
|
||||
}
|
||||
|
||||
func withGAXOptions(opts ...gax.CallOption) storageOption {
|
||||
return &gaxOption{opts}
|
||||
}
|
||||
|
||||
type gaxOption struct {
|
||||
opts []gax.CallOption
|
||||
}
|
||||
|
||||
func (o *gaxOption) Apply(s *settings) { s.gax = o.opts }
|
||||
|
||||
func withRetryConfig(rc *retryConfig) storageOption {
|
||||
return &retryOption{rc}
|
||||
}
|
||||
|
||||
type retryOption struct {
|
||||
rc *retryConfig
|
||||
}
|
||||
|
||||
func (o *retryOption) Apply(s *settings) { s.retry = o.rc }
|
||||
|
||||
func idempotent(i bool) storageOption {
|
||||
return &idempotentOption{i}
|
||||
}
|
||||
|
||||
type idempotentOption struct {
|
||||
idempotency bool
|
||||
}
|
||||
|
||||
func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency }
|
||||
|
||||
func withClientOptions(opts ...option.ClientOption) storageOption {
|
||||
return &clientOption{opts: opts}
|
||||
}
|
||||
|
||||
type clientOption struct {
|
||||
opts []option.ClientOption
|
||||
}
|
||||
|
||||
func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts }
|
||||
|
||||
func withUserProject(project string) storageOption {
|
||||
return &userProjectOption{project}
|
||||
}
|
||||
|
||||
type userProjectOption struct {
|
||||
project string
|
||||
}
|
||||
|
||||
func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project }
|
||||
|
||||
type openWriterParams struct {
|
||||
// Writer configuration
|
||||
|
||||
// ctx is the context used by the writer routine to make all network calls
|
||||
// and to manage the writer routine - see `Writer.ctx`.
|
||||
// Required.
|
||||
ctx context.Context
|
||||
// chunkSize - see `Writer.ChunkSize`.
|
||||
// Optional.
|
||||
chunkSize int
|
||||
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
|
||||
// Optional.
|
||||
chunkRetryDeadline time.Duration
|
||||
|
||||
// Object/request properties
|
||||
|
||||
// bucket - see `Writer.o.bucket`.
|
||||
// Required.
|
||||
bucket string
|
||||
// attrs - see `Writer.ObjectAttrs`.
|
||||
// Required.
|
||||
attrs *ObjectAttrs
|
||||
// conds - see `Writer.o.conds`.
|
||||
// Optional.
|
||||
conds *Conditions
|
||||
// encryptionKey - see `Writer.o.encryptionKey`
|
||||
// Optional.
|
||||
encryptionKey []byte
|
||||
// sendCRC32C - see `Writer.SendCRC32C`.
|
||||
// Optional.
|
||||
sendCRC32C bool
|
||||
|
||||
// Writer callbacks
|
||||
|
||||
// donec - see `Writer.donec`.
|
||||
// Required.
|
||||
donec chan struct{}
|
||||
// setError callback for reporting errors - see `Writer.error`.
|
||||
// Required.
|
||||
setError func(error)
|
||||
// progress callback for reporting upload progress - see `Writer.progress`.
|
||||
// Required.
|
||||
progress func(int64)
|
||||
// setObj callback for reporting the resulting object - see `Writer.obj`.
|
||||
// Required.
|
||||
setObj func(*ObjectAttrs)
|
||||
}
|
||||
|
||||
type newRangeReaderParams struct {
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
length int64
|
||||
object string
|
||||
offset int64
|
||||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
||||
}
|
||||
|
||||
type composeObjectRequest struct {
|
||||
dstBucket string
|
||||
dstObject destinationObject
|
||||
srcs []sourceObject
|
||||
predefinedACL string
|
||||
sendCRC32C bool
|
||||
}
|
||||
|
||||
type sourceObject struct {
|
||||
name string
|
||||
bucket string
|
||||
gen int64
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
}
|
||||
|
||||
type destinationObject struct {
|
||||
name string
|
||||
bucket string
|
||||
conds *Conditions
|
||||
attrs *ObjectAttrs // attrs to set on the destination object.
|
||||
encryptionKey []byte
|
||||
keyName string
|
||||
}
|
||||
|
||||
type rewriteObjectRequest struct {
|
||||
srcObject sourceObject
|
||||
dstObject destinationObject
|
||||
predefinedACL string
|
||||
token string
|
||||
maxBytesRewrittenPerCall int64
|
||||
}
|
||||
|
||||
type rewriteObjectResponse struct {
|
||||
resource *ObjectAttrs
|
||||
done bool
|
||||
written int64
|
||||
size int64
|
||||
token string
|
||||
}
|
233
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
Normal file
233
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
// You can immediately call Run on the returned Copier, or
|
||||
// you can configure it first.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
||||
// in which case the user project of src is billed.
|
||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
||||
return &Copier{dst: dst, src: src}
|
||||
}
|
||||
|
||||
// A Copier copies a source object to a destination.
|
||||
type Copier struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// RewriteToken can be set before calling Run to resume a copy
|
||||
// operation. After Run returns a non-nil error, RewriteToken will
|
||||
// have been updated to contain the value needed to resume the copy.
|
||||
RewriteToken string
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far and the total size in bytes of the source object.
|
||||
//
|
||||
// ProgressFunc is intended to make upload progress available to the
|
||||
// application. For example, the implementation of ProgressFunc may update
|
||||
// a progress bar in the application's UI, or log the result of
|
||||
// float64(copiedBytes)/float64(totalBytes).
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
||||
|
||||
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
|
||||
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
|
||||
// any.
|
||||
//
|
||||
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
|
||||
// (via ObjectHandle.Key) on the destination object will result in an error when
|
||||
// Run is called.
|
||||
DestinationKMSKeyName string
|
||||
|
||||
dst, src *ObjectHandle
|
||||
|
||||
// The maximum number of bytes that will be rewritten per rewrite request.
|
||||
// Most callers shouldn't need to specify this parameter - it is primarily
|
||||
// in place to support testing. If specified the value must be an integral
|
||||
// multiple of 1 MiB (1048576). Also, this only applies to requests where
|
||||
// the source and destination span locations and/or storage classes. Finally,
|
||||
// this value must not change across rewrite calls else you'll get an error
|
||||
// that the `rewriteToken` is invalid.
|
||||
maxBytesRewrittenPerCall int64
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := c.src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
||||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen)
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
req := &rewriteObjectRequest{
|
||||
srcObject: sourceObject{
|
||||
name: c.src.object,
|
||||
bucket: c.src.bucket,
|
||||
gen: c.src.gen,
|
||||
conds: c.src.conds,
|
||||
encryptionKey: c.src.encryptionKey,
|
||||
},
|
||||
dstObject: destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
keyName: c.DestinationKMSKeyName,
|
||||
},
|
||||
predefinedACL: c.PredefinedACL,
|
||||
token: c.RewriteToken,
|
||||
maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall,
|
||||
}
|
||||
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
var userProject string
|
||||
if c.dst.userProject != "" {
|
||||
userProject = c.dst.userProject
|
||||
} else if c.src.userProject != "" {
|
||||
userProject = c.src.userProject
|
||||
}
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject)
|
||||
|
||||
for {
|
||||
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.token
|
||||
req.token = res.token
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.written), uint64(res.size))
|
||||
}
|
||||
if res.done { // Finished successfully.
|
||||
return res.resource, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
//
|
||||
// The encryption key for the destination object will be used to decrypt all
|
||||
// source objects and encrypt the destination object. It is an error
|
||||
// to specify an encryption key for any of the source objects.
|
||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
||||
return &Composer{dst: dst, srcs: srcs}
|
||||
}
|
||||
|
||||
// A Composer composes source objects into a destination object.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed.
|
||||
type Composer struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Composer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
// If a CRC32C is sent, and the data in the destination object does not match
|
||||
// the checksum, the compose will be rejected.
|
||||
SendCRC32C bool
|
||||
|
||||
dst *ObjectHandle
|
||||
srcs []*ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the compose operation.
|
||||
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen)
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if src.bucket != c.dst.bucket {
|
||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
||||
}
|
||||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
}
|
||||
|
||||
req := &composeObjectRequest{
|
||||
dstBucket: c.dst.bucket,
|
||||
predefinedACL: c.PredefinedACL,
|
||||
sendCRC32C: c.SendCRC32C,
|
||||
}
|
||||
req.dstObject = destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
}
|
||||
for _, src := range c.srcs {
|
||||
s := sourceObject{
|
||||
name: src.object,
|
||||
bucket: src.bucket,
|
||||
gen: src.gen,
|
||||
conds: src.conds,
|
||||
}
|
||||
req.srcs = append(req.srcs, s)
|
||||
}
|
||||
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject)
|
||||
return c.dst.c.tc.ComposeObject(ctx, req, opts...)
|
||||
}
|
331
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
Normal file
331
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,331 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package storage provides an easy way to work with Google Cloud Storage.
|
||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
||||
|
||||
More information about Google Cloud Storage is available at
|
||||
https://cloud.google.com/storage/docs.
|
||||
|
||||
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
# Creating a Client
|
||||
|
||||
To start working with this package, create a [Client]:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
The client will use your default application credentials. Clients should be
|
||||
reused instead of created as needed. The methods of [Client] are safe for
|
||||
concurrent use by multiple goroutines.
|
||||
|
||||
You may configure the client by passing in options from the [google.golang.org/api/option]
|
||||
package. You may also use options defined in this package, such as [WithJSONReads].
|
||||
|
||||
If you only wish to access public data, you can create
|
||||
an unauthenticated client with
|
||||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
||||
environment variable to the address at which your emulator is running. This will
|
||||
send requests to that address instead of to Cloud Storage. You can then create
|
||||
and use a client as usual:
|
||||
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Please note that there is no official emulator for Cloud Storage.
|
||||
|
||||
# Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call [BucketHandle.Create]:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set
|
||||
the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use
|
||||
[BucketHandle.Attrs]:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
# Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||
you don't explicitly create an object. Instead, the first time you write
|
||||
to an object it will be created. You can use the standard Go [io.Reader]
|
||||
and [io.Writer] interfaces to read and write object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
# Listing objects
|
||||
|
||||
Listing objects in a bucket is done with the [BucketHandle.Objects] method:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
|
||||
Objects are listed lexicographically by name. To filter objects
|
||||
lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used:
|
||||
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
|
||||
// ... as before
|
||||
|
||||
If only a subset of object attributes is needed when listing, specifying this
|
||||
subset using [Query.SetAttrSelection] may speed up the listing process:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
|
||||
// ... as before
|
||||
|
||||
# ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see [Cloud Storage IAM docs].
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
# Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. [Conditions] let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
For example, say you've read an object's metadata into objAttrs. Now
|
||||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
# Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
Signing a URL requires credentials authorized to sign a URL. To use the same
|
||||
authentication that was used when instantiating the Storage client, use
|
||||
[BucketHandle.SignedURL].
|
||||
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
You can also sign a URL without creating a client. See the documentation of
|
||||
[SignedURL] for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
# Post Policy V4 Signed Request
|
||||
|
||||
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
|
||||
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
|
||||
by a user.
|
||||
|
||||
For more information, please see the [XML POST Object docs] as well
|
||||
as the documentation of [BucketHandle.GenerateSignedPostPolicyV4].
|
||||
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
|
||||
# Credential requirements for signing
|
||||
|
||||
If the GoogleAccessID and PrivateKey option fields are not provided, they will
|
||||
be automatically detected by [BucketHandle.SignedURL] and
|
||||
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true:
|
||||
- you are authenticated to the Storage Client with a service account's
|
||||
downloaded private key, either directly in code or by setting the
|
||||
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]),
|
||||
- your application is running on Google Compute Engine (GCE), or
|
||||
- you are logged into [gcloud using application default credentials]
|
||||
with [impersonation enabled].
|
||||
|
||||
Detecting GoogleAccessID may not be possible if you are authenticated using a
|
||||
token source or using [option.WithHTTPClient]. In this case, you can provide a
|
||||
service account email for GoogleAccessID and the client will attempt to sign
|
||||
the URL or Post Policy using that service account.
|
||||
|
||||
To generate the signature, you must have:
|
||||
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service
|
||||
account, and
|
||||
- the [IAM Service Account Credentials API] enabled (unless authenticating
|
||||
with a downloaded private key).
|
||||
|
||||
# Errors
|
||||
|
||||
Errors returned by this client are often of the type [googleapi.Error].
|
||||
These errors can be introspected for more information by using [errors.As]
|
||||
with the richer [googleapi.Error] type. For example:
|
||||
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok {
|
||||
if e.Code == 409 { ... }
|
||||
}
|
||||
|
||||
# Retrying failed requests
|
||||
|
||||
Methods in this package may retry calls that fail with transient errors.
|
||||
Retrying continues indefinitely unless the controlling context is canceled, the
|
||||
client is closed, or a non-transient error is received. To stop retries from
|
||||
continuing, use context timeouts or cancellation.
|
||||
|
||||
The retry strategy in this library follows best practices for Cloud Storage. By
|
||||
default, operations are retried only if they are idempotent, and exponential
|
||||
backoff with jitter is employed. In addition, errors are only retried if they
|
||||
are defined as transient by the service. See the [Cloud Storage retry docs]
|
||||
for more information.
|
||||
|
||||
Users can configure non-default retry behavior for a single library call (using
|
||||
[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a
|
||||
client (using [Client.SetRetry]). For example:
|
||||
|
||||
o := client.Bucket(bucket).Object(object).Retryer(
|
||||
// Use WithBackoff to change the timing of the exponential backoff.
|
||||
storage.WithBackoff(gax.Backoff{
|
||||
Initial: 2 * time.Second,
|
||||
}),
|
||||
// Use WithPolicy to configure the idempotency policy. RetryAlways will
|
||||
// retry the operation even if it is non-idempotent.
|
||||
storage.WithPolicy(storage.RetryAlways),
|
||||
)
|
||||
|
||||
// Use a context timeout to set an overall deadline on the call, including all
|
||||
// potential retries.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Delete an object using the specified strategy and timeout.
|
||||
if err := o.Delete(ctx); err != nil {
|
||||
// Handle err.
|
||||
}
|
||||
|
||||
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
||||
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
||||
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
||||
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
|
||||
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
|
||||
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
|
||||
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
92
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
Normal file
92
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2021 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License..
|
||||
|
||||
# Fail on any error
|
||||
set -eo pipefail
|
||||
|
||||
# Display commands being run
|
||||
set -x
|
||||
|
||||
# Only run on Go 1.17+
|
||||
min_minor_ver=17
|
||||
|
||||
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
||||
comps=(${v//./ })
|
||||
minor_ver=${comps[1]}
|
||||
|
||||
if [ "$minor_ver" -lt "$min_minor_ver" ]; then
|
||||
echo minor version $minor_ver, skipping
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export STORAGE_EMULATOR_HOST="http://localhost:9000"
|
||||
export STORAGE_EMULATOR_HOST_GRPC="localhost:8888"
|
||||
|
||||
DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench'
|
||||
DEFAULT_IMAGE_TAG='latest'
|
||||
DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG}
|
||||
CONTAINER_NAME=storage_testbench
|
||||
|
||||
# Note: --net=host makes the container bind directly to the Docker host’s network,
|
||||
# with no network isolation. If we were to use port-mapping instead, reset connection errors
|
||||
# would be captured differently and cause unexpected test behaviour.
|
||||
# The host networking driver works only on Linux hosts.
|
||||
# See more about using host networking: https://docs.docker.com/network/host/
|
||||
DOCKER_NETWORK="--net=host"
|
||||
# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to
|
||||
# differences in the network errors emitted by the system.
|
||||
if [ `go env GOOS` == 'darwin' ]; then
|
||||
DOCKER_NETWORK="-p 9000:9000 -p 8888:8888"
|
||||
fi
|
||||
|
||||
# Get the docker image for the testbench
|
||||
docker pull $DOCKER_IMAGE
|
||||
|
||||
# Start the testbench
|
||||
|
||||
docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE
|
||||
echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST"
|
||||
sleep 1
|
||||
|
||||
# Stop the testbench & cleanup environment variables
|
||||
function cleanup() {
|
||||
echo "Cleanup testbench"
|
||||
docker stop $CONTAINER_NAME
|
||||
unset STORAGE_EMULATOR_HOST;
|
||||
unset STORAGE_EMULATOR_HOST_GRPC;
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Check that the server is running - retry several times to allow for start-up time
|
||||
response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null)
|
||||
|
||||
if [[ $response != 200 ]]
|
||||
then
|
||||
echo "Testbench server did not start correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start the gRPC server on port 8888.
|
||||
echo "Starting the gRPC server on port 8888"
|
||||
response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888")
|
||||
|
||||
if [[ $response != 200 ]]
|
||||
then
|
||||
echo "Testbench gRPC server did not start correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log
|
1749
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
Normal file
1749
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
392
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
Normal file
392
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
Normal file
|
@ -0,0 +1,392 @@
|
|||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// HMACState is the state of the HMAC key.
|
||||
//
|
||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||
type HMACState string
|
||||
|
||||
const (
|
||||
// Active is the status for an active key that can be used to sign
|
||||
// requests.
|
||||
Active HMACState = "ACTIVE"
|
||||
|
||||
// Inactive is the status for an inactive key thus requests signed by
|
||||
// this key will be denied.
|
||||
Inactive HMACState = "INACTIVE"
|
||||
|
||||
// Deleted is the status for a key that is deleted.
|
||||
// Once in this state the key cannot key cannot be recovered
|
||||
// and does not count towards key limits. Deleted keys will be cleaned
|
||||
// up later.
|
||||
Deleted HMACState = "DELETED"
|
||||
)
|
||||
|
||||
// HMACKey is the representation of a Google Cloud Storage HMAC key.
|
||||
//
|
||||
// HMAC keys are used to authenticate signed access to objects. To enable HMAC key
|
||||
// authentication, please visit https://cloud.google.com/storage/docs/migrating.
|
||||
//
|
||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||
type HMACKey struct {
|
||||
// The HMAC's secret key.
|
||||
Secret string
|
||||
|
||||
// AccessID is the ID of the HMAC key.
|
||||
AccessID string
|
||||
|
||||
// Etag is the HTTP/1.1 Entity tag.
|
||||
Etag string
|
||||
|
||||
// ID is the ID of the HMAC key, including the ProjectID and AccessID.
|
||||
ID string
|
||||
|
||||
// ProjectID is the ID of the project that owns the
|
||||
// service account to which the key authenticates.
|
||||
ProjectID string
|
||||
|
||||
// ServiceAccountEmail is the email address
|
||||
// of the key's associated service account.
|
||||
ServiceAccountEmail string
|
||||
|
||||
// CreatedTime is the creation time of the HMAC key.
|
||||
CreatedTime time.Time
|
||||
|
||||
// UpdatedTime is the last modification time of the HMAC key metadata.
|
||||
UpdatedTime time.Time
|
||||
|
||||
// State is the state of the HMAC key.
|
||||
// It can be one of StateActive, StateInactive or StateDeleted.
|
||||
State HMACState
|
||||
}
|
||||
|
||||
// HMACKeyHandle helps provide access and management for HMAC keys.
|
||||
//
|
||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||
type HMACKeyHandle struct {
|
||||
projectID string
|
||||
accessID string
|
||||
retry *retryConfig
|
||||
tc storageClient
|
||||
}
|
||||
|
||||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
||||
return &HMACKeyHandle{
|
||||
projectID: projectID,
|
||||
accessID: accessID,
|
||||
retry: c.retry,
|
||||
tc: c.tc,
|
||||
}
|
||||
}
|
||||
|
||||
// Get invokes an RPC to retrieve the HMAC key referenced by the
|
||||
// HMACKeyHandle's accessID.
|
||||
//
|
||||
// Options such as UserProjectForHMACKeys can be used to set the
|
||||
// userProject to be billed against for operations.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
||||
// Only inactive HMAC keys can be deleted.
|
||||
// After deletion, a key cannot be used to authenticate requests.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
}
|
||||
|
||||
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
hkmd := hk.Metadata
|
||||
if hkmd == nil {
|
||||
return nil, errors.New("field Metadata cannot be nil")
|
||||
}
|
||||
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("field CreatedTime: %w", err)
|
||||
}
|
||||
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated)
|
||||
if err != nil && !updatedTimeCanBeNil {
|
||||
return nil, fmt.Errorf("field UpdatedTime: %w", err)
|
||||
}
|
||||
|
||||
hmKey := &HMACKey{
|
||||
AccessID: hkmd.AccessId,
|
||||
Secret: hk.Secret,
|
||||
Etag: hkmd.Etag,
|
||||
ID: hkmd.Id,
|
||||
State: HMACState(hkmd.State),
|
||||
ProjectID: hkmd.ProjectId,
|
||||
CreatedTime: createdTime,
|
||||
UpdatedTime: updatedTime,
|
||||
|
||||
ServiceAccountEmail: hkmd.ServiceAccountEmail,
|
||||
}
|
||||
|
||||
return hmKey, nil
|
||||
}
|
||||
|
||||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
|
||||
if pbmd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &HMACKey{
|
||||
AccessID: pbmd.GetAccessId(),
|
||||
ID: pbmd.GetId(),
|
||||
State: HMACState(pbmd.GetState()),
|
||||
ProjectID: pbmd.GetProject(),
|
||||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
|
||||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
|
||||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
if projectID == "" {
|
||||
return nil, errors.New("storage: expecting a non-blank projectID")
|
||||
}
|
||||
if serviceAccountEmail == "" {
|
||||
return nil, errors.New("storage: expecting a non-blank service account email")
|
||||
}
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
o := makeStorageOpts(false, c.retry, desc.userProjectID)
|
||||
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
||||
//
|
||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||
type HMACKeyAttrsToUpdate struct {
|
||||
// State is required and must be either StateActive or StateInactive.
|
||||
State HMACState
|
||||
|
||||
// Etag is an optional field and it is the HTTP/1.1 Entity tag.
|
||||
Etag string
|
||||
}
|
||||
|
||||
// Update mutates the HMACKey referred to by accessID.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
if au.State != Active && au.State != Inactive {
|
||||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
||||
}
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
isIdempotent := len(au.Etag) > 0
|
||||
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID)
|
||||
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// An HMACKeysIterator is an iterator over HMACKeys.
|
||||
//
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
//
|
||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
||||
type HMACKeysIterator struct {
|
||||
ctx context.Context
|
||||
raw *raw.ProjectsHmacKeysService
|
||||
projectID string
|
||||
hmacKeys []*HMACKey
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
index int
|
||||
desc hmacKeyDesc
|
||||
retry *retryConfig
|
||||
}
|
||||
|
||||
// ListHMACKeys returns an iterator for listing HMACKeys.
|
||||
//
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
o := makeStorageOpts(true, c.retry, desc.userProjectID)
|
||||
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...)
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
//
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (it *HMACKeysIterator) Next() (*HMACKey, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := it.hmacKeys[it.index]
|
||||
it.index++
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
//
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
// TODO: Remove fetch method upon integration. This method is internalized into
|
||||
// httpStorageClient.ListHMACKeys() as it is the only caller.
|
||||
call := it.raw.List(it.projectID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
call = call.PageToken(pageToken)
|
||||
}
|
||||
if it.desc.showDeletedKeys {
|
||||
call = call.ShowDeletedKeys(true)
|
||||
}
|
||||
if it.desc.userProjectID != "" {
|
||||
call = call.UserProject(it.desc.userProjectID)
|
||||
}
|
||||
if it.desc.forServiceAccountEmail != "" {
|
||||
call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
call = call.MaxResults(int64(pageSize))
|
||||
}
|
||||
|
||||
ctx := it.ctx
|
||||
var resp *raw.HmacKeysMetadata
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, it.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, metadata := range resp.Items {
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.hmacKeys = append(it.hmacKeys, hkey)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
type hmacKeyDesc struct {
|
||||
forServiceAccountEmail string
|
||||
showDeletedKeys bool
|
||||
userProjectID string
|
||||
}
|
||||
|
||||
// HMACKeyOption configures the behavior of HMACKey related methods and actions.
|
||||
//
|
||||
// This interface is EXPERIMENTAL and subject to change or removal without notice.
|
||||
type HMACKeyOption interface {
|
||||
withHMACKeyDesc(*hmacKeyDesc)
|
||||
}
|
||||
|
||||
type hmacKeyDescFunc func(*hmacKeyDesc)
|
||||
|
||||
func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) {
|
||||
hkdf(hkd)
|
||||
}
|
||||
|
||||
// ForHMACKeyServiceAccountEmail returns HMAC Keys that are
|
||||
// associated with the email address of a service account in the project.
|
||||
//
|
||||
// Only one service account email can be used as a filter, so if multiple
|
||||
// of these options are applied, the last email to be set will be used.
|
||||
//
|
||||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption {
|
||||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
|
||||
hkd.forServiceAccountEmail = serviceAccountEmail
|
||||
})
|
||||
}
|
||||
|
||||
// ShowDeletedHMACKeys will also list keys whose state is "DELETED".
|
||||
//
|
||||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func ShowDeletedHMACKeys() HMACKeyOption {
|
||||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
|
||||
hkd.showDeletedKeys = true
|
||||
})
|
||||
}
|
||||
|
||||
// UserProjectForHMACKeys will bill the request against userProjectID
|
||||
// if userProjectID is non-empty.
|
||||
//
|
||||
// Note: This is a noop right now and only provided for API compatibility.
|
||||
//
|
||||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func UserProjectForHMACKeys(userProjectID string) HMACKeyOption {
|
||||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
|
||||
hkd.userProjectID = userProjectID
|
||||
})
|
||||
}
|
1418
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
Normal file
1418
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
133
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
Normal file
133
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"cloud.google.com/go/iam"
|
||||
"cloud.google.com/go/iam/apiv1/iampb"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/genproto/googleapis/type/expr"
|
||||
)
|
||||
|
||||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{
|
||||
userProject: b.userProject,
|
||||
retry: b.retry,
|
||||
client: b.c,
|
||||
}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
userProject string
|
||||
retry *retryConfig
|
||||
client *Client
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
||||
return c.GetWithVersion(ctx, resource, 1)
|
||||
}
|
||||
|
||||
func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
isIdempotent := len(p.Etag) > 0
|
||||
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
|
||||
return c.client.tc.SetIamPolicy(ctx, resource, p, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
return &raw.Policy{
|
||||
Bindings: iamToStorageBindings(ip.Bindings),
|
||||
Etag: string(ip.Etag),
|
||||
Version: int64(ip.Version),
|
||||
}
|
||||
}
|
||||
|
||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||
var rbs []*raw.PolicyBindings
|
||||
for _, ib := range ibs {
|
||||
rbs = append(rbs, &raw.PolicyBindings{
|
||||
Role: ib.Role,
|
||||
Members: ib.Members,
|
||||
Condition: iamToStorageCondition(ib.Condition),
|
||||
})
|
||||
}
|
||||
return rbs
|
||||
}
|
||||
|
||||
func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr {
|
||||
if exprpb == nil {
|
||||
return nil
|
||||
}
|
||||
return &raw.Expr{
|
||||
Expression: exprpb.Expression,
|
||||
Description: exprpb.Description,
|
||||
Location: exprpb.Location,
|
||||
Title: exprpb.Title,
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||
return &iampb.Policy{
|
||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||
Etag: []byte(rp.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||
var ibs []*iampb.Binding
|
||||
for _, rb := range rbs {
|
||||
ibs = append(ibs, &iampb.Binding{
|
||||
Role: rb.Role,
|
||||
Members: rb.Members,
|
||||
Condition: iamFromStorageCondition(rb.Condition),
|
||||
})
|
||||
}
|
||||
return ibs
|
||||
}
|
||||
|
||||
func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr {
|
||||
if rawexpr == nil {
|
||||
return nil
|
||||
}
|
||||
return &expr.Expr{
|
||||
Expression: rawexpr.Expression,
|
||||
Description: rawexpr.Description,
|
||||
Location: rawexpr.Location,
|
||||
Title: rawexpr.Title,
|
||||
}
|
||||
}
|
176
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
Normal file
176
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2023 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
// Package storage is an auto-generated package for the
|
||||
// Cloud Storage API.
|
||||
//
|
||||
// Lets you store and retrieve potentially-large, immutable data objects.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// # General documentation
|
||||
//
|
||||
// For information about setting deadlines, reusing contexts, and more
|
||||
// please visit https://pkg.go.dev/cloud.google.com/go.
|
||||
//
|
||||
// # Example usage
|
||||
//
|
||||
// To get started with this package, create a client.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
// // - It may require correct/in-range values for request initialization.
|
||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
||||
// The returned client must be Closed when it is done being used.
|
||||
//
|
||||
// # Using the Client
|
||||
//
|
||||
// The following is an example of making an API call with the newly created client.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
// // - It may require correct/in-range values for request initialization.
|
||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// req := &storagepb.DeleteBucketRequest{
|
||||
// // TODO: Fill request struct fields.
|
||||
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
|
||||
// }
|
||||
// err = c.DeleteBucket(ctx, req)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
//
|
||||
// # Use of Context
|
||||
//
|
||||
// The ctx passed to NewClient is used for authentication requests and
|
||||
// for creating the underlying connection, but is not used for subsequent calls.
|
||||
// Individual methods on the client use the ctx given to them.
|
||||
//
|
||||
// To close the open connection, use the Close() method.
|
||||
package storage // import "cloud.google.com/go/storage/internal/apiv2"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// For more information on implementing a client constructor hook, see
|
||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
var versionClient string
|
||||
|
||||
func getVersionClient() string {
|
||||
if versionClient == "" {
|
||||
return "UNKNOWN"
|
||||
}
|
||||
return versionClient
|
||||
}
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
func checkDisableDeadlines() (bool, error) {
|
||||
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b, err := strconv.ParseBool(raw)
|
||||
return b, err
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write",
|
||||
}
|
||||
}
|
||||
|
||||
// versionGo returns the Go runtime version. The returned string
|
||||
// has no whitespace, suitable for reporting in header.
|
||||
func versionGo() string {
|
||||
const develPrefix = "devel +"
|
||||
|
||||
s := runtime.Version()
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
notSemverRune := func(r rune) bool {
|
||||
return !strings.ContainsRune("0123456789.", r)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return "UNKNOWN"
|
||||
}
|
168
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
Normal file
168
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
{
|
||||
"schema": "1.0",
|
||||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
|
||||
"language": "go",
|
||||
"protoPackage": "google.storage.v2",
|
||||
"libraryPackage": "cloud.google.com/go/storage/internal/apiv2",
|
||||
"services": {
|
||||
"Storage": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "Client",
|
||||
"rpcs": {
|
||||
"CancelResumableWrite": {
|
||||
"methods": [
|
||||
"CancelResumableWrite"
|
||||
]
|
||||
},
|
||||
"ComposeObject": {
|
||||
"methods": [
|
||||
"ComposeObject"
|
||||
]
|
||||
},
|
||||
"CreateBucket": {
|
||||
"methods": [
|
||||
"CreateBucket"
|
||||
]
|
||||
},
|
||||
"CreateHmacKey": {
|
||||
"methods": [
|
||||
"CreateHmacKey"
|
||||
]
|
||||
},
|
||||
"CreateNotificationConfig": {
|
||||
"methods": [
|
||||
"CreateNotificationConfig"
|
||||
]
|
||||
},
|
||||
"DeleteBucket": {
|
||||
"methods": [
|
||||
"DeleteBucket"
|
||||
]
|
||||
},
|
||||
"DeleteHmacKey": {
|
||||
"methods": [
|
||||
"DeleteHmacKey"
|
||||
]
|
||||
},
|
||||
"DeleteNotificationConfig": {
|
||||
"methods": [
|
||||
"DeleteNotificationConfig"
|
||||
]
|
||||
},
|
||||
"DeleteObject": {
|
||||
"methods": [
|
||||
"DeleteObject"
|
||||
]
|
||||
},
|
||||
"GetBucket": {
|
||||
"methods": [
|
||||
"GetBucket"
|
||||
]
|
||||
},
|
||||
"GetHmacKey": {
|
||||
"methods": [
|
||||
"GetHmacKey"
|
||||
]
|
||||
},
|
||||
"GetIamPolicy": {
|
||||
"methods": [
|
||||
"GetIamPolicy"
|
||||
]
|
||||
},
|
||||
"GetNotificationConfig": {
|
||||
"methods": [
|
||||
"GetNotificationConfig"
|
||||
]
|
||||
},
|
||||
"GetObject": {
|
||||
"methods": [
|
||||
"GetObject"
|
||||
]
|
||||
},
|
||||
"GetServiceAccount": {
|
||||
"methods": [
|
||||
"GetServiceAccount"
|
||||
]
|
||||
},
|
||||
"ListBuckets": {
|
||||
"methods": [
|
||||
"ListBuckets"
|
||||
]
|
||||
},
|
||||
"ListHmacKeys": {
|
||||
"methods": [
|
||||
"ListHmacKeys"
|
||||
]
|
||||
},
|
||||
"ListNotificationConfigs": {
|
||||
"methods": [
|
||||
"ListNotificationConfigs"
|
||||
]
|
||||
},
|
||||
"ListObjects": {
|
||||
"methods": [
|
||||
"ListObjects"
|
||||
]
|
||||
},
|
||||
"LockBucketRetentionPolicy": {
|
||||
"methods": [
|
||||
"LockBucketRetentionPolicy"
|
||||
]
|
||||
},
|
||||
"QueryWriteStatus": {
|
||||
"methods": [
|
||||
"QueryWriteStatus"
|
||||
]
|
||||
},
|
||||
"ReadObject": {
|
||||
"methods": [
|
||||
"ReadObject"
|
||||
]
|
||||
},
|
||||
"RewriteObject": {
|
||||
"methods": [
|
||||
"RewriteObject"
|
||||
]
|
||||
},
|
||||
"SetIamPolicy": {
|
||||
"methods": [
|
||||
"SetIamPolicy"
|
||||
]
|
||||
},
|
||||
"StartResumableWrite": {
|
||||
"methods": [
|
||||
"StartResumableWrite"
|
||||
]
|
||||
},
|
||||
"TestIamPermissions": {
|
||||
"methods": [
|
||||
"TestIamPermissions"
|
||||
]
|
||||
},
|
||||
"UpdateBucket": {
|
||||
"methods": [
|
||||
"UpdateBucket"
|
||||
]
|
||||
},
|
||||
"UpdateHmacKey": {
|
||||
"methods": [
|
||||
"UpdateHmacKey"
|
||||
]
|
||||
},
|
||||
"UpdateObject": {
|
||||
"methods": [
|
||||
"UpdateObject"
|
||||
]
|
||||
},
|
||||
"WriteObject": {
|
||||
"methods": [
|
||||
"WriteObject"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
26
vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go
generated
vendored
Normal file
26
vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// InsertMetadata inserts the given gRPC metadata into the outgoing context.
|
||||
func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
return insertMetadata(ctx, mds...)
|
||||
}
|
1619
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
Normal file
1619
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
10759
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
Normal file
10759
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
23
vendor/cloud.google.com/go/storage/internal/apiv2/version.go
generated
vendored
Normal file
23
vendor/cloud.google.com/go/storage/internal/apiv2/version.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2023 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by gapicgen. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import "cloud.google.com/go/storage/internal"
|
||||
|
||||
func init() {
|
||||
versionClient = internal.Version
|
||||
}
|
18
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
Normal file
18
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.30.1"
|
146
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
Normal file
146
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
sinternal "cloud.google.com/go/storage/internal"
|
||||
"github.com/google/uuid"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var defaultRetry *retryConfig = &retryConfig{}
|
||||
var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version)
|
||||
|
||||
// run determines whether a retry is necessary based on the config and
|
||||
// idempotency information. It then calls the function with or without retries
|
||||
// as appropriate, using the configured settings.
|
||||
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error {
|
||||
attempts := 1
|
||||
invocationID := uuid.New().String()
|
||||
|
||||
if retry == nil {
|
||||
retry = defaultRetry
|
||||
}
|
||||
if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever {
|
||||
setHeader(invocationID, attempts)
|
||||
return call()
|
||||
}
|
||||
bo := gax.Backoff{}
|
||||
if retry.backoff != nil {
|
||||
bo.Multiplier = retry.backoff.Multiplier
|
||||
bo.Initial = retry.backoff.Initial
|
||||
bo.Max = retry.backoff.Max
|
||||
}
|
||||
var errorFunc func(err error) bool = ShouldRetry
|
||||
if retry.shouldRetry != nil {
|
||||
errorFunc = retry.shouldRetry
|
||||
}
|
||||
|
||||
return internal.Retry(ctx, bo, func() (stop bool, err error) {
|
||||
setHeader(invocationID, attempts)
|
||||
err = call()
|
||||
attempts++
|
||||
return !errorFunc(err), err
|
||||
})
|
||||
}
|
||||
|
||||
func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) {
|
||||
return func(invocationID string, attempts int) {
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
header := req.Header()
|
||||
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
|
||||
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
|
||||
header.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement method setting header via context for gRPC
|
||||
func setRetryHeaderGRPC(_ context.Context) func(string, int) {
|
||||
return func(_ string, _ int) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldRetry returns true if an error is retryable, based on best practice
|
||||
// guidance from GCS. See
|
||||
// https://cloud.google.com/storage/docs/retry-strategy#go for more information
|
||||
// on what errors are considered retryable.
|
||||
//
|
||||
// If you would like to customize retryable errors, use the WithErrorFunc to
|
||||
// supply a RetryOption to your library calls. For example, to retry additional
|
||||
// errors, you can write a custom func that wraps ShouldRetry and also specifies
|
||||
// additional errors that should return true.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return true
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case *net.OpError:
|
||||
if strings.Contains(e.Error(), "use of closed network connection") {
|
||||
// TODO: check against net.ErrClosed (go 1.16+) instead of string
|
||||
return true
|
||||
}
|
||||
case *googleapi.Error:
|
||||
// Retry on 408, 429, and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
retriable := []string{"connection refused", "connection reset"}
|
||||
for _, s := range retriable {
|
||||
if strings.Contains(e.Error(), s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case interface{ Temporary() bool }:
|
||||
if e.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per
|
||||
// https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html.
|
||||
//
|
||||
// This is only necessary for the experimental gRPC-based media operations.
|
||||
if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable {
|
||||
return true
|
||||
}
|
||||
// Unwrap is only supported in go1.13.x+
|
||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return ShouldRetry(e.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
200
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
Normal file
200
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Notification describes how to send Cloud PubSub messages when certain
|
||||
// events occur in a bucket.
|
||||
type Notification struct {
|
||||
//The ID of the notification.
|
||||
ID string
|
||||
|
||||
// The ID of the topic to which this subscription publishes.
|
||||
TopicID string
|
||||
|
||||
// The ID of the project to which the topic belongs.
|
||||
TopicProjectID string
|
||||
|
||||
// Only send notifications about listed event types. If empty, send notifications
|
||||
// for all event types.
|
||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
||||
EventTypes []string
|
||||
|
||||
// If present, only apply this notification configuration to object names that
|
||||
// begin with this prefix.
|
||||
ObjectNamePrefix string
|
||||
|
||||
// An optional list of additional attributes to attach to each Cloud PubSub
|
||||
// message published for this notification subscription.
|
||||
CustomAttributes map[string]string
|
||||
|
||||
// The contents of the message payload.
|
||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
||||
PayloadFormat string
|
||||
}
|
||||
|
||||
// Values for Notification.PayloadFormat.
|
||||
const (
|
||||
// Send no payload with notification messages.
|
||||
NoPayload = "NONE"
|
||||
|
||||
// Send object metadata as JSON with notification messages.
|
||||
JSONPayload = "JSON_API_V1"
|
||||
)
|
||||
|
||||
// Values for Notification.EventTypes.
|
||||
const (
|
||||
// Event that occurs when an object is successfully created.
|
||||
ObjectFinalizeEvent = "OBJECT_FINALIZE"
|
||||
|
||||
// Event that occurs when the metadata of an existing object changes.
|
||||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
|
||||
|
||||
// Event that occurs when an object is permanently deleted.
|
||||
ObjectDeleteEvent = "OBJECT_DELETE"
|
||||
|
||||
// Event that occurs when the live version of an object becomes an
|
||||
// archived version.
|
||||
ObjectArchiveEvent = "OBJECT_ARCHIVE"
|
||||
)
|
||||
|
||||
func toNotification(rn *raw.Notification) *Notification {
|
||||
n := &Notification{
|
||||
ID: rn.Id,
|
||||
EventTypes: rn.EventTypes,
|
||||
ObjectNamePrefix: rn.ObjectNamePrefix,
|
||||
CustomAttributes: rn.CustomAttributes,
|
||||
PayloadFormat: rn.PayloadFormat,
|
||||
}
|
||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
|
||||
return n
|
||||
}
|
||||
|
||||
func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification {
|
||||
n := &Notification{
|
||||
ID: pbn.GetName(),
|
||||
EventTypes: pbn.GetEventTypes(),
|
||||
ObjectNamePrefix: pbn.GetObjectNamePrefix(),
|
||||
CustomAttributes: pbn.GetCustomAttributes(),
|
||||
PayloadFormat: pbn.GetPayloadFormat(),
|
||||
}
|
||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
|
||||
return n
|
||||
}
|
||||
|
||||
func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
|
||||
return &storagepb.NotificationConfig{
|
||||
Name: n.ID,
|
||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||
n.TopicProjectID, n.TopicID),
|
||||
EventTypes: n.EventTypes,
|
||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||
CustomAttributes: n.CustomAttributes,
|
||||
PayloadFormat: n.PayloadFormat,
|
||||
}
|
||||
}
|
||||
|
||||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
||||
|
||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
||||
// resource name returned by the service. If the name is malformed, it returns
|
||||
// "?" for both IDs.
|
||||
func parseNotificationTopic(nt string) (projectID, topicID string) {
|
||||
matches := topicRE.FindStringSubmatch(nt)
|
||||
if matches == nil {
|
||||
return "?", "?"
|
||||
}
|
||||
return matches[1], matches[2]
|
||||
}
|
||||
|
||||
func toRawNotification(n *Notification) *raw.Notification {
|
||||
return &raw.Notification{
|
||||
Id: n.ID,
|
||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||
n.TopicProjectID, n.TopicID),
|
||||
EventTypes: n.EventTypes,
|
||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||
CustomAttributes: n.CustomAttributes,
|
||||
PayloadFormat: string(n.PayloadFormat),
|
||||
}
|
||||
}
|
||||
|
||||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
||||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
||||
// returned Notification's ID can be used to refer to it.
|
||||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if n.ID != "" {
|
||||
return nil, errors.New("storage: AddNotification: ID must not be set")
|
||||
}
|
||||
if n.TopicProjectID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
|
||||
}
|
||||
if n.TopicID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
||||
}
|
||||
|
||||
opts := makeStorageOpts(false, b.retry, b.userProject)
|
||||
ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||
// indexed by notification ID.
|
||||
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||
m := map[string]*Notification{}
|
||||
for _, rn := range rns {
|
||||
m[rn.Id] = toNotification(rn)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification {
|
||||
m := map[string]*Notification{}
|
||||
for _, n := range ns {
|
||||
m[n.Name] = toNotificationFromProto(n)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// DeleteNotification deletes the notification with the given ID.
|
||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
|
||||
}
|
75
vendor/cloud.google.com/go/storage/option.go
generated
vendored
Normal file
75
vendor/cloud.google.com/go/storage/option.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2023 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
)
|
||||
|
||||
// storageConfig contains the Storage client option configuration that can be
|
||||
// set through storageClientOptions.
|
||||
type storageConfig struct {
|
||||
useJSONforReads bool
|
||||
readAPIWasSet bool
|
||||
}
|
||||
|
||||
// newStorageConfig generates a new storageConfig with all the given
|
||||
// storageClientOptions applied.
|
||||
func newStorageConfig(opts ...option.ClientOption) storageConfig {
|
||||
var conf storageConfig
|
||||
for _, opt := range opts {
|
||||
if storageOpt, ok := opt.(storageClientOption); ok {
|
||||
storageOpt.ApplyStorageOpt(&conf)
|
||||
}
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// A storageClientOption is an option for a Google Storage client.
|
||||
type storageClientOption interface {
|
||||
option.ClientOption
|
||||
ApplyStorageOpt(*storageConfig)
|
||||
}
|
||||
|
||||
// WithJSONReads is an option that may be passed to a Storage Client on creation.
|
||||
// It sets the client to use the JSON API for object reads. Currently, the
|
||||
// default API used for reads is XML.
|
||||
// Setting this option is required to use the GenerationNotMatch condition.
|
||||
//
|
||||
// Note that when this option is set, reads will return a zero date for
|
||||
// [ReaderObjectAttrs].LastModified and may return a different value for
|
||||
// [ReaderObjectAttrs].CacheControl.
|
||||
func WithJSONReads() option.ClientOption {
|
||||
return &withReadAPI{useJSON: true}
|
||||
}
|
||||
|
||||
// WithXMLReads is an option that may be passed to a Storage Client on creation.
|
||||
// It sets the client to use the JSON API for object reads.
|
||||
//
|
||||
// This is the current default.
|
||||
func WithXMLReads() option.ClientOption {
|
||||
return &withReadAPI{useJSON: false}
|
||||
}
|
||||
|
||||
type withReadAPI struct {
|
||||
internaloption.EmbeddableAdapter
|
||||
useJSON bool
|
||||
}
|
||||
|
||||
func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) {
|
||||
c.useJSONforReads = w.useJSON
|
||||
c.readAPIWasSet = true
|
||||
}
|
436
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
Normal file
436
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
Normal file
|
@ -0,0 +1,436 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PostPolicyV4Options are used to construct a signed post policy.
|
||||
// Please see https://cloud.google.com/storage/docs/xml-api/post-object
|
||||
// for reference about the fields.
|
||||
type PostPolicyV4Options struct {
|
||||
// GoogleAccessID represents the authorizer of the signed URL generation.
|
||||
// It is typically the Google service account client email address from
|
||||
// the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
|
||||
// Required.
|
||||
GoogleAccessID string
|
||||
|
||||
// PrivateKey is the Google service account private key. It is obtainable
|
||||
// from the Google Developers Console.
|
||||
// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
|
||||
// create a service account client ID or reuse one of your existing service account
|
||||
// credentials. Click on the "Generate new P12 key" to generate and download
|
||||
// a new private key. Once you download the P12 file, use the following command
|
||||
// to convert it into a PEM file.
|
||||
//
|
||||
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
|
||||
//
|
||||
// Provide the contents of the PEM file as a byte slice.
|
||||
// Exactly one of PrivateKey or SignBytes must be non-nil.
|
||||
PrivateKey []byte
|
||||
|
||||
// SignBytes is a function for implementing custom signing.
|
||||
//
|
||||
// Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined,
|
||||
// SignBytes will be ignored.
|
||||
// This SignBytes function expects the bytes it receives to be hashed, while
|
||||
// SignRawBytes accepts the raw bytes without hashing, allowing more flexibility.
|
||||
// Add the following to the top of your signing function to hash the bytes
|
||||
// to use SignRawBytes instead:
|
||||
// shaSum := sha256.Sum256(bytes)
|
||||
// bytes = shaSum[:]
|
||||
//
|
||||
SignBytes func(hashBytes []byte) (signature []byte, err error)
|
||||
|
||||
// SignRawBytes is a function for implementing custom signing. For example, if
|
||||
// your application is running on Google App Engine, you can use
|
||||
// appengine's internal signing function:
|
||||
// ctx := appengine.NewContext(request)
|
||||
// acc, _ := appengine.ServiceAccount(ctx)
|
||||
// &PostPolicyV4Options{
|
||||
// GoogleAccessID: acc,
|
||||
// SignRawBytes: func(b []byte) ([]byte, error) {
|
||||
// _, signedBytes, err := appengine.SignBytes(ctx, b)
|
||||
// return signedBytes, err
|
||||
// },
|
||||
// // etc.
|
||||
// })
|
||||
//
|
||||
// SignRawBytes is equivalent to the SignBytes field on SignedURLOptions;
|
||||
// that is, you may use the same signing function for the two.
|
||||
//
|
||||
// Exactly one of PrivateKey or SignRawBytes must be non-nil.
|
||||
SignRawBytes func(bytes []byte) (signature []byte, err error)
|
||||
|
||||
// Expires is the expiration time on the signed URL.
|
||||
// It must be a time in the future.
|
||||
// Required.
|
||||
Expires time.Time
|
||||
|
||||
// Style provides options for the type of URL to use. Options are
|
||||
// PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See
|
||||
// https://cloud.google.com/storage/docs/request-endpoints for details.
|
||||
// Optional.
|
||||
Style URLStyle
|
||||
|
||||
// Insecure when set indicates that the generated URL's scheme
|
||||
// will use "http" instead of "https" (default).
|
||||
// Optional.
|
||||
Insecure bool
|
||||
|
||||
// Fields specifies the attributes of a PostPolicyV4 request.
|
||||
// When Fields is non-nil, its attributes must match those that will
|
||||
// passed into field Conditions.
|
||||
// Optional.
|
||||
Fields *PolicyV4Fields
|
||||
|
||||
// The conditions that the uploaded file will be expected to conform to.
|
||||
// When used, the failure of an upload to satisfy a condition will result in
|
||||
// a 4XX status code, back with the message describing the problem.
|
||||
// Optional.
|
||||
Conditions []PostPolicyV4Condition
|
||||
|
||||
shouldHashSignBytes bool
|
||||
}
|
||||
|
||||
func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options {
|
||||
return &PostPolicyV4Options{
|
||||
GoogleAccessID: opts.GoogleAccessID,
|
||||
PrivateKey: opts.PrivateKey,
|
||||
SignBytes: opts.SignBytes,
|
||||
SignRawBytes: opts.SignRawBytes,
|
||||
Expires: opts.Expires,
|
||||
Style: opts.Style,
|
||||
Insecure: opts.Insecure,
|
||||
Fields: opts.Fields,
|
||||
Conditions: opts.Conditions,
|
||||
shouldHashSignBytes: opts.shouldHashSignBytes,
|
||||
}
|
||||
}
|
||||
|
||||
// PolicyV4Fields describes the attributes for a PostPolicyV4 request.
|
||||
type PolicyV4Fields struct {
|
||||
// ACL specifies the access control permissions for the object.
|
||||
// Optional.
|
||||
ACL string
|
||||
// CacheControl specifies the caching directives for the object.
|
||||
// Optional.
|
||||
CacheControl string
|
||||
// ContentType specifies the media type of the object.
|
||||
// Optional.
|
||||
ContentType string
|
||||
// ContentDisposition specifies how the file will be served back to requesters.
|
||||
// Optional.
|
||||
ContentDisposition string
|
||||
// ContentEncoding specifies the decompressive transcoding that the object.
|
||||
// This field is complementary to ContentType in that the file could be
|
||||
// compressed but ContentType specifies the file's original media type.
|
||||
// Optional.
|
||||
ContentEncoding string
|
||||
// Metadata specifies custom metadata for the object.
|
||||
// If any key doesn't begin with "x-goog-meta-", an error will be returned.
|
||||
// Optional.
|
||||
Metadata map[string]string
|
||||
// StatusCodeOnSuccess when set, specifies the status code that Cloud Storage
|
||||
// will serve back on successful upload of the object.
|
||||
// Optional.
|
||||
StatusCodeOnSuccess int
|
||||
// RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage
|
||||
// will serve back on successful upload of the object.
|
||||
// Optional.
|
||||
RedirectToURLOnSuccess string
|
||||
}
|
||||
|
||||
// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request.
|
||||
type PostPolicyV4 struct {
|
||||
// URL is the generated URL that the file upload will be made to.
|
||||
URL string
|
||||
// Fields specifies the generated key-values that the file uploader
|
||||
// must include in their multipart upload form.
|
||||
Fields map[string]string
|
||||
}
|
||||
|
||||
// PostPolicyV4Condition describes the constraints that the subsequent
|
||||
// object upload's multipart form fields will be expected to conform to.
|
||||
type PostPolicyV4Condition interface {
|
||||
isEmpty() bool
|
||||
json.Marshaler
|
||||
}
|
||||
|
||||
type startsWith struct {
|
||||
key, value string
|
||||
}
|
||||
|
||||
func (sw *startsWith) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([]string{"starts-with", sw.key, sw.value})
|
||||
}
|
||||
func (sw *startsWith) isEmpty() bool {
|
||||
return sw.value == ""
|
||||
}
|
||||
|
||||
// ConditionStartsWith checks that an attributes starts with value.
|
||||
// An empty value will cause this condition to be ignored.
|
||||
func ConditionStartsWith(key, value string) PostPolicyV4Condition {
|
||||
return &startsWith{key, value}
|
||||
}
|
||||
|
||||
type contentLengthRangeCondition struct {
|
||||
start, end uint64
|
||||
}
|
||||
|
||||
func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end})
|
||||
}
|
||||
func (clr *contentLengthRangeCondition) isEmpty() bool {
|
||||
return clr.start == 0 && clr.end == 0
|
||||
}
|
||||
|
||||
type singleValueCondition struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
func (svc *singleValueCondition) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]string{svc.name: svc.value})
|
||||
}
|
||||
func (svc *singleValueCondition) isEmpty() bool {
|
||||
return svc.value == ""
|
||||
}
|
||||
|
||||
// ConditionContentLengthRange constraints the limits that the
|
||||
// multipart upload's range header will be expected to be within.
|
||||
func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition {
|
||||
return &contentLengthRangeCondition{start, end}
|
||||
}
|
||||
|
||||
func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition {
|
||||
return &singleValueCondition{"success_action_redirect", redirectURL}
|
||||
}
|
||||
|
||||
func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition {
|
||||
svc := &singleValueCondition{name: "success_action_status"}
|
||||
if statusCode > 0 {
|
||||
svc.value = fmt.Sprintf("%d", statusCode)
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
||||
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
||||
// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4
|
||||
// method which uses the Client's credentials to handle authentication.
|
||||
func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
|
||||
if bucket == "" {
|
||||
return nil, errors.New("storage: bucket must be non-empty")
|
||||
}
|
||||
if object == "" {
|
||||
return nil, errors.New("storage: object must be non-empty")
|
||||
}
|
||||
now := utcNow()
|
||||
if err := validatePostPolicyV4Options(opts, now); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var signingFn func(hashedBytes []byte) ([]byte, error)
|
||||
switch {
|
||||
case opts.SignRawBytes != nil:
|
||||
signingFn = opts.SignRawBytes
|
||||
case opts.shouldHashSignBytes:
|
||||
signingFn = opts.SignBytes
|
||||
case len(opts.PrivateKey) != 0:
|
||||
parsedRSAPrivKey, err := parseKey(opts.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signingFn = func(b []byte) ([]byte, error) {
|
||||
sum := sha256.Sum256(b)
|
||||
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:])
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
|
||||
}
|
||||
|
||||
var descFields PolicyV4Fields
|
||||
if opts.Fields != nil {
|
||||
descFields = *opts.Fields
|
||||
}
|
||||
|
||||
if err := validateMetadata(descFields.Metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build the policy.
|
||||
conds := make([]PostPolicyV4Condition, len(opts.Conditions))
|
||||
copy(conds, opts.Conditions)
|
||||
conds = append(conds,
|
||||
// These are ordered lexicographically. Technically the order doesn't matter
|
||||
// for creating the policy, but we use this order to match the
|
||||
// cross-language conformance tests for this feature.
|
||||
&singleValueCondition{"acl", descFields.ACL},
|
||||
&singleValueCondition{"cache-control", descFields.CacheControl},
|
||||
&singleValueCondition{"content-disposition", descFields.ContentDisposition},
|
||||
&singleValueCondition{"content-encoding", descFields.ContentEncoding},
|
||||
&singleValueCondition{"content-type", descFields.ContentType},
|
||||
conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess),
|
||||
conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess),
|
||||
)
|
||||
|
||||
YYYYMMDD := now.Format(yearMonthDay)
|
||||
policyFields := map[string]string{
|
||||
"key": object,
|
||||
"x-goog-date": now.Format(iso8601),
|
||||
"x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request",
|
||||
"x-goog-algorithm": "GOOG4-RSA-SHA256",
|
||||
"acl": descFields.ACL,
|
||||
"cache-control": descFields.CacheControl,
|
||||
"content-disposition": descFields.ContentDisposition,
|
||||
"content-encoding": descFields.ContentEncoding,
|
||||
"content-type": descFields.ContentType,
|
||||
"success_action_redirect": descFields.RedirectToURLOnSuccess,
|
||||
}
|
||||
for key, value := range descFields.Metadata {
|
||||
conds = append(conds, &singleValueCondition{key, value})
|
||||
policyFields[key] = value
|
||||
}
|
||||
|
||||
// Following from the order expected by the conformance test cases,
|
||||
// hence manually inserting these fields in a specific order.
|
||||
conds = append(conds,
|
||||
&singleValueCondition{"bucket", bucket},
|
||||
&singleValueCondition{"key", object},
|
||||
&singleValueCondition{"x-goog-date", now.Format(iso8601)},
|
||||
&singleValueCondition{
|
||||
name: "x-goog-credential",
|
||||
value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request",
|
||||
},
|
||||
&singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"},
|
||||
)
|
||||
|
||||
nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions))
|
||||
for _, cond := range conds {
|
||||
if cond == nil || !cond.isEmpty() {
|
||||
nonEmptyConds = append(nonEmptyConds, cond)
|
||||
}
|
||||
}
|
||||
condsAsJSON, err := json.Marshal(map[string]interface{}{
|
||||
"conditions": nonEmptyConds,
|
||||
"expiration": opts.Expires.Format(time.RFC3339),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err)
|
||||
}
|
||||
|
||||
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)
|
||||
var signature []byte
|
||||
var signErr error
|
||||
|
||||
if opts.shouldHashSignBytes {
|
||||
// SignBytes expects hashed bytes as input instead of raw bytes, so we hash them
|
||||
shaSum := sha256.Sum256([]byte(b64Policy))
|
||||
signature, signErr = signingFn(shaSum[:])
|
||||
} else {
|
||||
signature, signErr = signingFn([]byte(b64Policy))
|
||||
}
|
||||
if signErr != nil {
|
||||
return nil, signErr
|
||||
}
|
||||
|
||||
policyFields["policy"] = b64Policy
|
||||
policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature)
|
||||
|
||||
// Construct the URL.
|
||||
scheme := "https"
|
||||
if opts.Insecure {
|
||||
scheme = "http"
|
||||
}
|
||||
path := opts.Style.path(bucket, "") + "/"
|
||||
u := &url.URL{
|
||||
Path: path,
|
||||
RawPath: pathEncodeV4(path),
|
||||
Host: opts.Style.host(bucket),
|
||||
Scheme: scheme,
|
||||
}
|
||||
|
||||
if descFields.StatusCodeOnSuccess > 0 {
|
||||
policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess)
|
||||
}
|
||||
|
||||
// Clear out fields with blanks values.
|
||||
for key, value := range policyFields {
|
||||
if value == "" {
|
||||
delete(policyFields, key)
|
||||
}
|
||||
}
|
||||
pp4 := &PostPolicyV4{
|
||||
Fields: policyFields,
|
||||
URL: u.String(),
|
||||
}
|
||||
return pp4, nil
|
||||
}
|
||||
|
||||
// validatePostPolicyV4Options checks that:
|
||||
// * GoogleAccessID is set
|
||||
// * either PrivateKey or SignRawBytes/SignBytes is set, but not both
|
||||
// * the deadline set in Expires is not in the past
|
||||
// * if Style is not set, it'll use PathStyle
|
||||
// * sets shouldHashSignBytes to true if opts.SignBytes should be used
|
||||
func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error {
|
||||
if opts == nil || opts.GoogleAccessID == "" {
|
||||
return errors.New("storage: missing required GoogleAccessID")
|
||||
}
|
||||
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank {
|
||||
return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
|
||||
}
|
||||
if opts.Expires.Before(now) {
|
||||
return errors.New("storage: expecting Expires to be in the future")
|
||||
}
|
||||
if opts.Style == nil {
|
||||
opts.Style = PathStyle()
|
||||
}
|
||||
if opts.SignRawBytes == nil && opts.SignBytes != nil {
|
||||
opts.shouldHashSignBytes = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-",
|
||||
// otherwise it will return an error.
|
||||
func validateMetadata(hdrs map[string]string) (err error) {
|
||||
if len(hdrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
badKeys := make([]string, 0, len(hdrs))
|
||||
for key := range hdrs {
|
||||
if !strings.HasPrefix(key, "x-goog-meta-") {
|
||||
badKeys = append(badKeys, key)
|
||||
}
|
||||
}
|
||||
if len(badKeys) != 0 {
|
||||
err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", "))
|
||||
}
|
||||
return
|
||||
}
|
274
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
Normal file
274
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,274 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// ReaderObjectAttrs are attributes about the object being read. These are populated
|
||||
// during the New call. This struct only holds a subset of object attributes: to
|
||||
// get the full set of attributes, use ObjectHandle.Attrs.
|
||||
//
|
||||
// Each field is read-only.
|
||||
type ReaderObjectAttrs struct {
|
||||
// Size is the length of the object's content.
|
||||
Size int64
|
||||
|
||||
// StartOffset is the byte offset within the object
|
||||
// from which reading begins.
|
||||
// This value is only non-zero for range requests.
|
||||
StartOffset int64
|
||||
|
||||
// ContentType is the MIME type of the object's content.
|
||||
ContentType string
|
||||
|
||||
// ContentEncoding is the encoding of the object's content.
|
||||
ContentEncoding string
|
||||
|
||||
// CacheControl specifies whether and for how long browser and Internet
|
||||
// caches are allowed to cache your objects.
|
||||
CacheControl string
|
||||
|
||||
// LastModified is the time that the object was last modified.
|
||||
LastModified time.Time
|
||||
|
||||
// Generation is the generation number of the object's content.
|
||||
Generation int64
|
||||
|
||||
// Metageneration is the version of the metadata for this object at
|
||||
// this generation. This field is used for preconditions and for
|
||||
// detecting changes in metadata. A metageneration number is only
|
||||
// meaningful in the context of a particular generation of a
|
||||
// particular object.
|
||||
Metageneration int64
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader to read the contents of the
|
||||
// object.
|
||||
// ErrObjectNotExist will be returned if the object is not found.
|
||||
//
|
||||
// The caller must call Close on the returned Reader when done reading.
|
||||
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
|
||||
return o.NewRangeReader(ctx, 0, -1)
|
||||
}
|
||||
|
||||
// NewRangeReader reads part of an object, reading at most length bytes
|
||||
// starting at the given offset. If length is negative, the object is read
|
||||
// until the end. If offset is negative, the object is read abs(offset) bytes
|
||||
// from the end, and length must also be negative to indicate all remaining
|
||||
// bytes will be read.
|
||||
//
|
||||
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
|
||||
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
|
||||
// that file will be served back whole, regardless of the requested range as
|
||||
// Google Cloud Storage dictates.
|
||||
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset < 0 && length >= 0 {
|
||||
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset)
|
||||
}
|
||||
if o.conds != nil {
|
||||
if err := o.conds.validate("NewRangeReader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
|
||||
params := &newRangeReaderParams{
|
||||
bucket: o.bucket,
|
||||
object: o.object,
|
||||
gen: o.gen,
|
||||
offset: offset,
|
||||
length: length,
|
||||
encryptionKey: o.encryptionKey,
|
||||
conds: o.conds,
|
||||
readCompressed: o.readCompressed,
|
||||
}
|
||||
|
||||
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decompressiveTranscoding returns true if the request was served decompressed
|
||||
// and different than its original storage form. This happens when the "Content-Encoding"
|
||||
// header is "gzip".
|
||||
// See:
|
||||
// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
||||
// - https://github.com/googleapis/google-cloud-go/issues/1800
|
||||
func decompressiveTranscoding(res *http.Response) bool {
|
||||
// Decompressive Transcoding.
|
||||
return res.Header.Get("Content-Encoding") == "gzip" ||
|
||||
res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip"
|
||||
}
|
||||
|
||||
func uncompressedByServer(res *http.Response) bool {
|
||||
// If the data is stored as gzip but is not encoded as gzip, then it
|
||||
// was uncompressed by the server.
|
||||
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
|
||||
res.Header.Get("Content-Encoding") != "gzip"
|
||||
}
|
||||
|
||||
// parseCRC32c parses the crc32c hash from the X-Goog-Hash header.
|
||||
// It can parse headers in the form [crc32c=xxx md5=xxx] (XML responses) or the
|
||||
// form [crc32c=xxx,md5=xxx] (JSON responses). The md5 hash is ignored.
|
||||
func parseCRC32c(res *http.Response) (uint32, bool) {
|
||||
const prefix = "crc32c="
|
||||
for _, spec := range res.Header["X-Goog-Hash"] {
|
||||
values := strings.Split(spec, ",")
|
||||
|
||||
for _, v := range values {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
c, err := decodeUint32(v[len(prefix):])
|
||||
if err == nil {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// setConditionsHeaders sets precondition request headers for downloads
|
||||
// using the XML API. It assumes that the conditions have been validated.
|
||||
func setConditionsHeaders(headers http.Header, conds *Conditions) error {
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if conds.MetagenerationMatch != 0 {
|
||||
headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch))
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch))
|
||||
case conds.DoesNotExist:
|
||||
headers.Set("x-goog-if-generation-match", "0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrap a request to look similar to an apiary library request, in order to
|
||||
// be used by run().
|
||||
type readerRequestWrapper struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (w *readerRequestWrapper) Header() http.Header {
|
||||
return w.req.Header
|
||||
}
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
//
|
||||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
||||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
||||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||
type Reader struct {
|
||||
Attrs ReaderObjectAttrs
|
||||
seen, remain, size int64
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
return r.reader.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.reader.Read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||
// Check CRC here. It would be natural to check it in Close, but
|
||||
// everybody defers Close on the assumption that it doesn't return
|
||||
// anything worth looking at.
|
||||
if err == io.EOF {
|
||||
if r.gotCRC != r.wantCRC {
|
||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||
r.gotCRC, r.wantCRC)
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.Size.
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.Attrs.Size
|
||||
}
|
||||
|
||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||
func (r *Reader) Remain() int64 {
|
||||
return r.remain
|
||||
}
|
||||
|
||||
// ContentType returns the content type of the object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.ContentType.
|
||||
func (r *Reader) ContentType() string {
|
||||
return r.Attrs.ContentType
|
||||
}
|
||||
|
||||
// ContentEncoding returns the content encoding of the object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.ContentEncoding.
|
||||
func (r *Reader) ContentEncoding() string {
|
||||
return r.Attrs.ContentEncoding
|
||||
}
|
||||
|
||||
// CacheControl returns the cache control of the object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.CacheControl.
|
||||
func (r *Reader) CacheControl() string {
|
||||
return r.Attrs.CacheControl
|
||||
}
|
||||
|
||||
// LastModified returns the value of the Last-Modified header.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.LastModified.
|
||||
func (r *Reader) LastModified() (time.Time, error) {
|
||||
return r.Attrs.LastModified, nil
|
||||
}
|
2221
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
Normal file
2221
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
30067
vendor/cloud.google.com/go/storage/storage.replay
generated
vendored
Normal file
30067
vendor/cloud.google.com/go/storage/storage.replay
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
273
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
Normal file
273
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,273 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
type Writer struct {
|
||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||
// must be initialized before the first Write call. Nil or zero-valued
|
||||
// attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC32C specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
// If a CRC32C is sent, and the data written does not match the checksum,
|
||||
// the write will be rejected.
|
||||
//
|
||||
// Note: SendCRC32C must be set to true BEFORE the first call to
|
||||
// Writer.Write() in order to send the checksum. If it is set after that
|
||||
// point, the checksum will be ignored.
|
||||
SendCRC32C bool
|
||||
|
||||
// ChunkSize controls the maximum number of bytes of the object that the
|
||||
// Writer will attempt to send to the server in a single request. Objects
|
||||
// smaller than the size will be sent in a single request, while larger
|
||||
// objects will be split over multiple requests. The value will be rounded up
|
||||
// to the nearest multiple of 256K. The default ChunkSize is 16MiB.
|
||||
//
|
||||
// Each Writer will internally allocate a buffer of size ChunkSize. This is
|
||||
// used to buffer input data and allow for the input to be sent again if a
|
||||
// request must be retried.
|
||||
//
|
||||
// If you upload small objects (< 16MiB), you should set ChunkSize
|
||||
// to a value slightly larger than the objects' sizes to avoid memory bloat.
|
||||
// This is especially important if you are uploading many small objects
|
||||
// concurrently. See
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size
|
||||
// for more information about performance trade-offs related to ChunkSize.
|
||||
//
|
||||
// If ChunkSize is set to zero, chunking will be disabled and the object will
|
||||
// be uploaded in a single request without the use of a buffer. This will
|
||||
// further reduce memory used during uploads, but will also prevent the writer
|
||||
// from retrying in case of a transient error from the server or resuming an
|
||||
// upload that fails midway through, since the buffer is required in order to
|
||||
// retry the failed request.
|
||||
//
|
||||
// ChunkSize must be set before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk
|
||||
// resumable uploads.
|
||||
//
|
||||
// For uploads of larger files, the Writer will attempt to retry if the
|
||||
// request to upload a particular chunk fails with a transient error.
|
||||
// If a single chunk has been attempting to upload for longer than this
|
||||
// deadline and the request fails, it will no longer be retried, and the error
|
||||
// will be returned to the caller. This is only applicable for files which are
|
||||
// large enough to require a multi-chunk resumable upload. The default value
|
||||
// is 32s. Users may want to pick a longer deadline if they are using larger
|
||||
// values for ChunkSize or if they expect to have a slow or unreliable
|
||||
// internet connection.
|
||||
//
|
||||
// To set a deadline on the entire upload, use context timeout or
|
||||
// cancellation.
|
||||
ChunkRetryDeadline time.Duration
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far.
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(int64)
|
||||
|
||||
ctx context.Context
|
||||
o *ObjectHandle
|
||||
|
||||
opened bool
|
||||
pw *io.PipeWriter
|
||||
|
||||
donec chan struct{} // closed after err and obj are set.
|
||||
obj *ObjectAttrs
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
// error even though the write failed (or will fail). Always
|
||||
// use the error returned from Writer.Close to determine if
|
||||
// the upload was successful.
|
||||
//
|
||||
// Writes will be retried on transient errors from the server, unless
|
||||
// Writer.ChunkSize has been set to zero.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
w.mu.Lock()
|
||||
werr := w.err
|
||||
w.mu.Unlock()
|
||||
if werr != nil {
|
||||
return 0, werr
|
||||
}
|
||||
if !w.opened {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
n, err = w.pw.Write(p)
|
||||
if err != nil {
|
||||
w.mu.Lock()
|
||||
werr := w.err
|
||||
w.mu.Unlock()
|
||||
// Preserve existing functionality that when context is canceled, Write will return
|
||||
// context.Canceled instead of "io: read/write on closed pipe". This hides the
|
||||
// pipe implementation detail from users and makes Write seem as though it's an RPC.
|
||||
if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) {
|
||||
return n, werr
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Closing either the read or write causes the entire pipe to close.
|
||||
if err := w.pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-w.donec
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) openWriter() (err error) {
|
||||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.o.gen != defaultGen {
|
||||
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
|
||||
}
|
||||
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
|
||||
params := &openWriterParams{
|
||||
ctx: w.ctx,
|
||||
chunkSize: w.ChunkSize,
|
||||
chunkRetryDeadline: w.ChunkRetryDeadline,
|
||||
bucket: w.o.bucket,
|
||||
attrs: &w.ObjectAttrs,
|
||||
conds: w.o.conds,
|
||||
encryptionKey: w.o.encryptionKey,
|
||||
sendCRC32C: w.SendCRC32C,
|
||||
donec: w.donec,
|
||||
setError: w.error,
|
||||
progress: w.progress,
|
||||
setObj: func(o *ObjectAttrs) { w.obj = o },
|
||||
}
|
||||
if err := w.ctx.Err(); err != nil {
|
||||
return err // short-circuit
|
||||
}
|
||||
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.opened = true
|
||||
go w.monitorCancel()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// monitorCancel is intended to be used as a background goroutine. It monitors the
|
||||
// context, and when it observes that the context has been canceled, it manually
|
||||
// closes things that do not take a context.
|
||||
func (w *Writer) monitorCancel() {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
w.mu.Lock()
|
||||
werr := w.ctx.Err()
|
||||
w.err = werr
|
||||
w.mu.Unlock()
|
||||
|
||||
// Closing either the read or write causes the entire pipe to close.
|
||||
w.CloseWithError(werr)
|
||||
case <-w.donec:
|
||||
}
|
||||
}
|
||||
|
||||
// CloseWithError aborts the write operation with the provided error.
|
||||
// CloseWithError always returns nil.
|
||||
//
|
||||
// Deprecated: cancel the context passed to NewWriter instead.
|
||||
func (w *Writer) CloseWithError(err error) error {
|
||||
if !w.opened {
|
||||
return nil
|
||||
}
|
||||
return w.pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Attrs returns metadata about a successfully-written object.
|
||||
// It's only valid to call it after Close returns nil.
|
||||
func (w *Writer) Attrs() *ObjectAttrs {
|
||||
return w.obj
|
||||
}
|
||||
|
||||
func (w *Writer) validateWriteAttrs() error {
|
||||
attrs := w.ObjectAttrs
|
||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
||||
// we don't want to store an object under the wrong name).
|
||||
if attrs.Name != w.o.object {
|
||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
||||
}
|
||||
if !utf8.ValidString(attrs.Name) {
|
||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
||||
}
|
||||
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
|
||||
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
if w.ChunkSize < 0 {
|
||||
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// progress is a convenience wrapper that reports write progress to the Writer
|
||||
// ProgressFunc if it is set and progress is non-zero.
|
||||
func (w *Writer) progress(p int64) {
|
||||
if w.ProgressFunc != nil && p != 0 {
|
||||
w.ProgressFunc(p)
|
||||
}
|
||||
}
|
||||
|
||||
// error acquires the Writer's lock, sets the Writer's err to the given error,
|
||||
// then relinquishes the lock.
|
||||
func (w *Writer) error(err error) {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
}
|
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
|
@ -3,8 +3,7 @@
|
|||
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
|
@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
|||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
|
@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
|||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
|
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
|
@ -16,19 +16,11 @@ const (
|
|||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
|
@ -50,10 +42,10 @@ func New() *Digest {
|
|||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
|||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
|
@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
|||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
|
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
|
@ -1,215 +1,209 @@
|
|||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
blockLoop()
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
ADDQ n, h
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
|
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
|
@ -1,3 +1,5 @@
|
|||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
|
@ -1,4 +1,5 @@
|
|||
// +build !amd64 appengine !gc purego
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
|
@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
|||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
|
@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
|||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
|
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
|
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
|
@ -11,7 +12,7 @@ import (
|
|||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
|
|
524
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
Normal file
524
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,524 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapJSONUnmarshalV2 = false
|
||||
|
||||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||
return new(Unmarshaler).UnmarshalNext(d, m)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a JSON object from r into m.
|
||||
func Unmarshal(r io.Reader, m proto.Message) error {
|
||||
return new(Unmarshaler).Unmarshal(r, m)
|
||||
}
|
||||
|
||||
// UnmarshalString unmarshals a JSON object from s into m.
|
||||
func UnmarshalString(s string, m proto.Message) error {
|
||||
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
|
||||
}
|
||||
|
||||
// Unmarshaler is a configurable object for converting from a JSON
|
||||
// representation to a protocol buffer object.
|
||||
type Unmarshaler struct {
|
||||
// AllowUnknownFields specifies whether to allow messages to contain
|
||||
// unknown JSON fields, as opposed to failing to unmarshal.
|
||||
AllowUnknownFields bool
|
||||
|
||||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||
// If unset, the global registry is used by default.
|
||||
AnyResolver AnyResolver
|
||||
}
|
||||
|
||||
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
|
||||
// they are unmarshaled from JSON. Messages that implement this should also
|
||||
// implement JSONPBMarshaler so that the custom format can be produced.
|
||||
//
|
||||
// The JSON unmarshaling must follow the JSON to proto specification:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
//
|
||||
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||
type JSONPBUnmarshaler interface {
|
||||
UnmarshalJSONPB(*Unmarshaler, []byte) error
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a JSON object from r into m.
|
||||
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
|
||||
return u.UnmarshalNext(json.NewDecoder(r), m)
|
||||
}
|
||||
|
||||
// UnmarshalNext unmarshals the next JSON object from d into m.
|
||||
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
|
||||
if m == nil {
|
||||
return errors.New("invalid nil message")
|
||||
}
|
||||
|
||||
// Parse the next JSON object from the stream.
|
||||
raw := json.RawMessage{}
|
||||
if err := d.Decode(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for custom unmarshalers first since they may not properly
|
||||
// implement protobuf reflection that the logic below relies on.
|
||||
if jsu, ok := m.(JSONPBUnmarshaler); ok {
|
||||
return jsu.UnmarshalJSONPB(u, raw)
|
||||
}
|
||||
|
||||
mr := proto.MessageReflect(m)
|
||||
|
||||
// NOTE: For historical reasons, a top-level null is treated as a noop.
|
||||
// This is incorrect, but kept for compatibility.
|
||||
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if wrapJSONUnmarshalV2 {
|
||||
// NOTE: If input message is non-empty, we need to preserve merge semantics
|
||||
// of the old jsonpb implementation. These semantics are not supported by
|
||||
// the protobuf JSON specification.
|
||||
isEmpty := true
|
||||
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
|
||||
isEmpty = false // at least one iteration implies non-empty
|
||||
return false
|
||||
})
|
||||
if !isEmpty {
|
||||
// Perform unmarshaling into a newly allocated, empty message.
|
||||
mr = mr.New()
|
||||
|
||||
// Use a defer to copy all unmarshaled fields into the original message.
|
||||
dst := proto.MessageReflect(m)
|
||||
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
dst.Set(fd, v)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Unmarshal using the v2 JSON unmarshaler.
|
||||
opts := protojson.UnmarshalOptions{
|
||||
DiscardUnknown: u.AllowUnknownFields,
|
||||
}
|
||||
if u.AnyResolver != nil {
|
||||
opts.Resolver = anyResolver{u.AnyResolver}
|
||||
}
|
||||
return opts.Unmarshal(raw, mr.Interface())
|
||||
} else {
|
||||
if err := u.unmarshalMessage(mr, raw); err != nil {
|
||||
return err
|
||||
}
|
||||
return protoV2.CheckInitialized(mr.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
|
||||
return jsu.UnmarshalJSONPB(u, in)
|
||||
}
|
||||
|
||||
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch wellKnownType(md.FullName()) {
|
||||
case "Any":
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawTypeURL, ok := jsonObject["@type"]
|
||||
if !ok {
|
||||
return errors.New("Any JSON doesn't have '@type'")
|
||||
}
|
||||
typeURL, err := unquoteString(string(rawTypeURL))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
|
||||
}
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
|
||||
|
||||
var m2 protoreflect.Message
|
||||
if u.AnyResolver != nil {
|
||||
mi, err := u.AnyResolver.Resolve(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = proto.MessageReflect(mi)
|
||||
} else {
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
if err == protoregistry.NotFound {
|
||||
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
m2 = mt.New()
|
||||
}
|
||||
|
||||
if wellKnownType(m2.Descriptor().FullName()) != "" {
|
||||
rawValue, ok := jsonObject["value"]
|
||||
if !ok {
|
||||
return errors.New("Any JSON doesn't have 'value'")
|
||||
}
|
||||
if err := u.unmarshalMessage(m2, rawValue); err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||
}
|
||||
} else {
|
||||
delete(jsonObject, "@type")
|
||||
rawJSON, err := json.Marshal(jsonObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
|
||||
}
|
||||
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
|
||||
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
rawWire, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
|
||||
}
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
|
||||
return nil
|
||||
case "BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue":
|
||||
fd := fds.ByNumber(1)
|
||||
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return nil
|
||||
case "Duration":
|
||||
v, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
|
||||
sec := d.Nanoseconds() / 1e9
|
||||
nsec := d.Nanoseconds() % 1e9
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||
return nil
|
||||
case "Timestamp":
|
||||
v, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
|
||||
sec := t.Unix()
|
||||
nsec := t.Nanosecond()
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
|
||||
return nil
|
||||
case "Value":
|
||||
switch {
|
||||
case string(in) == "null":
|
||||
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
|
||||
case string(in) == "true":
|
||||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
|
||||
case string(in) == "false":
|
||||
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
|
||||
case hasPrefixAndSuffix('"', in, '"'):
|
||||
s, err := unquoteString(string(in))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||
}
|
||||
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
|
||||
case hasPrefixAndSuffix('[', in, ']'):
|
||||
v := m.Mutable(fds.ByNumber(6))
|
||||
return u.unmarshalMessage(v.Message(), in)
|
||||
case hasPrefixAndSuffix('{', in, '}'):
|
||||
v := m.Mutable(fds.ByNumber(5))
|
||||
return u.unmarshalMessage(v.Message(), in)
|
||||
default:
|
||||
f, err := strconv.ParseFloat(string(in), 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unrecognized type for Value %q", in)
|
||||
}
|
||||
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
|
||||
}
|
||||
return nil
|
||||
case "ListValue":
|
||||
var jsonArray []json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||
return fmt.Errorf("bad ListValue: %v", err)
|
||||
}
|
||||
|
||||
lv := m.Mutable(fds.ByNumber(1)).List()
|
||||
for _, raw := range jsonArray {
|
||||
ve := lv.NewElement()
|
||||
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
|
||||
return err
|
||||
}
|
||||
lv.Append(ve)
|
||||
}
|
||||
return nil
|
||||
case "Struct":
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return fmt.Errorf("bad StructValue: %v", err)
|
||||
}
|
||||
|
||||
mv := m.Mutable(fds.ByNumber(1)).Map()
|
||||
for key, raw := range jsonObject {
|
||||
kv := protoreflect.ValueOf(key).MapKey()
|
||||
vv := mv.NewValue()
|
||||
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
|
||||
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
|
||||
}
|
||||
mv.Set(kv, vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle known fields.
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||
continue // weak reference is not linked in
|
||||
}
|
||||
|
||||
// Search for any raw JSON value associated with this field.
|
||||
var raw json.RawMessage
|
||||
name := string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
name = string(fd.Message().Name())
|
||||
}
|
||||
if v, ok := jsonObject[name]; ok {
|
||||
delete(jsonObject, name)
|
||||
raw = v
|
||||
}
|
||||
name = string(fd.JSONName())
|
||||
if v, ok := jsonObject[name]; ok {
|
||||
delete(jsonObject, name)
|
||||
raw = v
|
||||
}
|
||||
|
||||
field := m.NewField(fd)
|
||||
// Unmarshal the field value.
|
||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||
continue
|
||||
}
|
||||
v, err := u.unmarshalValue(field, raw, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
|
||||
// Handle extension fields.
|
||||
for name, raw := range jsonObject {
|
||||
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Resolve the extension field by name.
|
||||
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(md) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
continue
|
||||
}
|
||||
delete(jsonObject, name)
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
field := m.NewField(fd)
|
||||
// Unmarshal the field value.
|
||||
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
|
||||
continue
|
||||
}
|
||||
v, err := u.unmarshalValue(field, raw, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
|
||||
if !u.AllowUnknownFields && len(jsonObject) > 0 {
|
||||
for name := range jsonObject {
|
||||
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||
if md := fd.Message(); md != nil {
|
||||
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
|
||||
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
|
||||
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
var jsonArray []json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonArray); err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv := v.List()
|
||||
for _, raw := range jsonArray {
|
||||
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(ve)
|
||||
}
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
var jsonObject map[string]json.RawMessage
|
||||
if err := json.Unmarshal(in, &jsonObject); err != nil {
|
||||
return v, err
|
||||
}
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := v.Map()
|
||||
for key, raw := range jsonObject {
|
||||
var kv protoreflect.MapKey
|
||||
if kfd.Kind() == protoreflect.StringKind {
|
||||
kv = protoreflect.ValueOf(key).MapKey()
|
||||
} else {
|
||||
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
kv = v.MapKey()
|
||||
}
|
||||
|
||||
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
mv.Set(kv, vv)
|
||||
}
|
||||
return v, nil
|
||||
default:
|
||||
return u.unmarshalSingularValue(v, in, fd)
|
||||
}
|
||||
}
|
||||
|
||||
var nonFinite = map[string]float64{
|
||||
`"NaN"`: math.NaN(),
|
||||
`"Infinity"`: math.Inf(+1),
|
||||
`"-Infinity"`: math.Inf(-1),
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return unmarshalValue(in, new(bool))
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
return unmarshalValue(trimQuote(in), new(int32))
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return unmarshalValue(trimQuote(in), new(int64))
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
return unmarshalValue(trimQuote(in), new(uint32))
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return unmarshalValue(trimQuote(in), new(uint64))
|
||||
case protoreflect.FloatKind:
|
||||
if f, ok := nonFinite[string(in)]; ok {
|
||||
return protoreflect.ValueOfFloat32(float32(f)), nil
|
||||
}
|
||||
return unmarshalValue(trimQuote(in), new(float32))
|
||||
case protoreflect.DoubleKind:
|
||||
if f, ok := nonFinite[string(in)]; ok {
|
||||
return protoreflect.ValueOfFloat64(float64(f)), nil
|
||||
}
|
||||
return unmarshalValue(trimQuote(in), new(float64))
|
||||
case protoreflect.StringKind:
|
||||
return unmarshalValue(in, new(string))
|
||||
case protoreflect.BytesKind:
|
||||
return unmarshalValue(in, new([]byte))
|
||||
case protoreflect.EnumKind:
|
||||
if hasPrefixAndSuffix('"', in, '"') {
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
|
||||
if vd == nil {
|
||||
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
|
||||
}
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
return unmarshalValue(in, new(protoreflect.EnumNumber))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
err := u.unmarshalMessage(v.Message(), in)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
|
||||
err := json.Unmarshal(in, v)
|
||||
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
|
||||
}
|
||||
|
||||
func unquoteString(in string) (out string, err error) {
|
||||
err = json.Unmarshal([]byte(in), &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
|
||||
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// trimQuote is like unquoteString but simply strips surrounding quotes.
|
||||
// This is incorrect, but is behavior done by the legacy implementation.
|
||||
func trimQuote(in []byte) []byte {
|
||||
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
|
||||
in = in[1 : len(in)-1]
|
||||
}
|
||||
return in
|
||||
}
|
559
vendor/github.com/golang/protobuf/jsonpb/encode.go
generated
vendored
Normal file
559
vendor/github.com/golang/protobuf/jsonpb/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,559 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapJSONMarshalV2 = false
|
||||
|
||||
// Marshaler is a configurable object for marshaling protocol buffer messages
|
||||
// to the specified JSON representation.
|
||||
type Marshaler struct {
|
||||
// OrigName specifies whether to use the original protobuf name for fields.
|
||||
OrigName bool
|
||||
|
||||
// EnumsAsInts specifies whether to render enum values as integers,
|
||||
// as opposed to string values.
|
||||
EnumsAsInts bool
|
||||
|
||||
// EmitDefaults specifies whether to render fields with zero values.
|
||||
EmitDefaults bool
|
||||
|
||||
// Indent controls whether the output is compact or not.
|
||||
// If empty, the output is compact JSON. Otherwise, every JSON object
|
||||
// entry and JSON array value will be on its own line.
|
||||
// Each line will be preceded by repeated copies of Indent, where the
|
||||
// number of copies is the current indentation depth.
|
||||
Indent string
|
||||
|
||||
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
|
||||
// If unset, the global registry is used by default.
|
||||
AnyResolver AnyResolver
|
||||
}
|
||||
|
||||
// JSONPBMarshaler is implemented by protobuf messages that customize the
|
||||
// way they are marshaled to JSON. Messages that implement this should also
|
||||
// implement JSONPBUnmarshaler so that the custom format can be parsed.
|
||||
//
|
||||
// The JSON marshaling must follow the proto to JSON specification:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
//
|
||||
// Deprecated: Custom types should implement protobuf reflection instead.
|
||||
type JSONPBMarshaler interface {
|
||||
MarshalJSONPB(*Marshaler) ([]byte, error)
|
||||
}
|
||||
|
||||
// Marshal serializes a protobuf message as JSON into w.
|
||||
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
|
||||
b, err := jm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalToString serializes a protobuf message as JSON in string form.
|
||||
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
|
||||
b, err := jm.marshal(m)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
|
||||
v := reflect.ValueOf(m)
|
||||
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||
return nil, errors.New("Marshal called with nil")
|
||||
}
|
||||
|
||||
// Check for custom marshalers first since they may not properly
|
||||
// implement protobuf reflection that the logic below relies on.
|
||||
if jsm, ok := m.(JSONPBMarshaler); ok {
|
||||
return jsm.MarshalJSONPB(jm)
|
||||
}
|
||||
|
||||
if wrapJSONMarshalV2 {
|
||||
opts := protojson.MarshalOptions{
|
||||
UseProtoNames: jm.OrigName,
|
||||
UseEnumNumbers: jm.EnumsAsInts,
|
||||
EmitUnpopulated: jm.EmitDefaults,
|
||||
Indent: jm.Indent,
|
||||
}
|
||||
if jm.AnyResolver != nil {
|
||||
opts.Resolver = anyResolver{jm.AnyResolver}
|
||||
}
|
||||
return opts.Marshal(proto.MessageReflect(m).Interface())
|
||||
} else {
|
||||
// Check for unpopulated required fields first.
|
||||
m2 := proto.MessageReflect(m)
|
||||
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := jsonWriter{Marshaler: jm}
|
||||
err := w.marshalMessage(m2, "", "")
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
type jsonWriter struct {
|
||||
*Marshaler
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *jsonWriter) write(s string) {
|
||||
w.buf = append(w.buf, s...)
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
|
||||
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
|
||||
b, err := jsm.MarshalJSONPB(w.Marshaler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if typeURL != "" {
|
||||
// we are marshaling this object to an Any type
|
||||
var js map[string]*json.RawMessage
|
||||
if err = json.Unmarshal(b, &js); err != nil {
|
||||
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
|
||||
}
|
||||
turl, err := json.Marshal(typeURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
|
||||
}
|
||||
js["@type"] = (*json.RawMessage)(&turl)
|
||||
if b, err = json.Marshal(js); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// Handle well-known types.
|
||||
const secondInNanos = int64(time.Second / time.Nanosecond)
|
||||
switch wellKnownType(md.FullName()) {
|
||||
case "Any":
|
||||
return w.marshalAny(m, indent)
|
||||
case "BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue":
|
||||
fd := fds.ByNumber(1)
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
case "Duration":
|
||||
const maxSecondsInDuration = 315576000000
|
||||
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision."
|
||||
s := m.Get(fds.ByNumber(1)).Int()
|
||||
ns := m.Get(fds.ByNumber(2)).Int()
|
||||
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
|
||||
return fmt.Errorf("seconds out of range %v", s)
|
||||
}
|
||||
if ns <= -secondInNanos || ns >= secondInNanos {
|
||||
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
|
||||
}
|
||||
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
|
||||
return errors.New("signs of seconds and nanos do not match")
|
||||
}
|
||||
var sign string
|
||||
if s < 0 || ns < 0 {
|
||||
sign, s, ns = "-", -1*s, -1*ns
|
||||
}
|
||||
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
w.write(fmt.Sprintf(`"%vs"`, x))
|
||||
return nil
|
||||
case "Timestamp":
|
||||
// "RFC 3339, where generated output will always be Z-normalized
|
||||
// and uses 0, 3, 6 or 9 fractional digits."
|
||||
s := m.Get(fds.ByNumber(1)).Int()
|
||||
ns := m.Get(fds.ByNumber(2)).Int()
|
||||
if ns < 0 || ns >= secondInNanos {
|
||||
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
|
||||
}
|
||||
t := time.Unix(s, ns).UTC()
|
||||
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
w.write(fmt.Sprintf(`"%vZ"`, x))
|
||||
return nil
|
||||
case "Value":
|
||||
// JSON value; which is a null, number, string, bool, object, or array.
|
||||
od := md.Oneofs().Get(0)
|
||||
fd := m.WhichOneof(od)
|
||||
if fd == nil {
|
||||
return errors.New("nil Value")
|
||||
}
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
case "Struct", "ListValue":
|
||||
// JSON object or array.
|
||||
fd := fds.ByNumber(1)
|
||||
return w.marshalValue(fd, m.Get(fd), indent)
|
||||
}
|
||||
|
||||
w.write("{")
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
}
|
||||
|
||||
firstField := true
|
||||
if typeURL != "" {
|
||||
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
if fd == nil {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
|
||||
if !m.Has(fd) {
|
||||
if !w.EmitDefaults || fd.ContainingOneof() != nil {
|
||||
continue
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
|
||||
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
|
||||
}
|
||||
}
|
||||
|
||||
if !firstField {
|
||||
w.writeComma()
|
||||
}
|
||||
if err := w.marshalField(fd, v, indent); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
|
||||
// Handle proto2 extensions.
|
||||
if md.ExtensionRanges().Len() > 0 {
|
||||
// Collect a sorted list of all extension descriptor and values.
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
if !firstField {
|
||||
w.writeComma()
|
||||
}
|
||||
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
|
||||
return err
|
||||
}
|
||||
firstField = false
|
||||
}
|
||||
}
|
||||
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
}
|
||||
w.write("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *jsonWriter) writeComma() {
|
||||
if w.Indent != "" {
|
||||
w.write(",\n")
|
||||
} else {
|
||||
w.write(",")
|
||||
}
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
|
||||
// "If the Any contains a value that has a special JSON mapping,
|
||||
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||
// Otherwise, the value will be converted into a JSON object,
|
||||
// and the "@type" field will be inserted to indicate the actual data type."
|
||||
md := m.Descriptor()
|
||||
typeURL := m.Get(md.Fields().ByNumber(1)).String()
|
||||
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
|
||||
|
||||
var m2 protoreflect.Message
|
||||
if w.AnyResolver != nil {
|
||||
mi, err := w.AnyResolver.Resolve(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = proto.MessageReflect(mi)
|
||||
} else {
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m2 = mt.New()
|
||||
}
|
||||
|
||||
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if wellKnownType(m2.Descriptor().FullName()) == "" {
|
||||
return w.marshalMessage(m2, indent, typeURL)
|
||||
}
|
||||
|
||||
w.write("{")
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
}
|
||||
if err := w.marshalTypeURL(indent, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
w.writeComma()
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(`"value": `)
|
||||
} else {
|
||||
w.write(`"value":`)
|
||||
}
|
||||
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
}
|
||||
w.write("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`"@type":`)
|
||||
if w.Indent != "" {
|
||||
w.write(" ")
|
||||
}
|
||||
b, err := json.Marshal(typeURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalField writes field description and value to the Writer.
|
||||
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
if w.Indent != "" {
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`"`)
|
||||
switch {
|
||||
case fd.IsExtension():
|
||||
// For message set, use the fname of the message as the extension name.
|
||||
name := string(fd.FullName())
|
||||
if isMessageSet(fd.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
w.write("[" + name + "]")
|
||||
case w.OrigName:
|
||||
name := string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
name = string(fd.Message().Name())
|
||||
}
|
||||
w.write(name)
|
||||
default:
|
||||
w.write(string(fd.JSONName()))
|
||||
}
|
||||
w.write(`":`)
|
||||
if w.Indent != "" {
|
||||
w.write(" ")
|
||||
}
|
||||
return w.marshalValue(fd, v, indent)
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
w.write("[")
|
||||
comma := ""
|
||||
lv := v.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
w.write(comma)
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
|
||||
return err
|
||||
}
|
||||
comma = ","
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write("]")
|
||||
return nil
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := v.Map()
|
||||
|
||||
// Collect a sorted list of all map keys and values.
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
|
||||
w.write(`{`)
|
||||
comma := ""
|
||||
for _, entry := range entries {
|
||||
w.write(comma)
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
|
||||
s := fmt.Sprint(entry.key.Interface())
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
|
||||
w.write(`:`)
|
||||
if w.Indent != "" {
|
||||
w.write(` `)
|
||||
}
|
||||
|
||||
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
|
||||
return err
|
||||
}
|
||||
comma = ","
|
||||
}
|
||||
if w.Indent != "" {
|
||||
w.write("\n")
|
||||
w.write(indent)
|
||||
w.write(w.Indent)
|
||||
}
|
||||
w.write(`}`)
|
||||
return nil
|
||||
default:
|
||||
return w.marshalSingularValue(fd, v, indent)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
|
||||
switch {
|
||||
case !v.IsValid():
|
||||
w.write("null")
|
||||
return nil
|
||||
case fd.Message() != nil:
|
||||
return w.marshalMessage(v.Message(), indent+w.Indent, "")
|
||||
case fd.Enum() != nil:
|
||||
if fd.Enum().FullName() == "google.protobuf.NullValue" {
|
||||
w.write("null")
|
||||
return nil
|
||||
}
|
||||
|
||||
vd := fd.Enum().Values().ByNumber(v.Enum())
|
||||
if vd == nil || w.EnumsAsInts {
|
||||
w.write(strconv.Itoa(int(v.Enum())))
|
||||
} else {
|
||||
w.write(`"` + string(vd.Name()) + `"`)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
switch v.Interface().(type) {
|
||||
case float32, float64:
|
||||
switch {
|
||||
case math.IsInf(v.Float(), +1):
|
||||
w.write(`"Infinity"`)
|
||||
return nil
|
||||
case math.IsInf(v.Float(), -1):
|
||||
w.write(`"-Infinity"`)
|
||||
return nil
|
||||
case math.IsNaN(v.Float()):
|
||||
w.write(`"NaN"`)
|
||||
return nil
|
||||
}
|
||||
case int64, uint64:
|
||||
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(v.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.write(string(b))
|
||||
return nil
|
||||
}
|
||||
}
|
69
vendor/github.com/golang/protobuf/jsonpb/json.go
generated
vendored
Normal file
69
vendor/github.com/golang/protobuf/jsonpb/json.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jsonpb provides functionality to marshal and unmarshal between a
|
||||
// protocol buffer message and JSON. It follows the specification at
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||
//
|
||||
// Do not rely on the default behavior of the standard encoding/json package
|
||||
// when called on generated message types as it does not operate correctly.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
|
||||
// package instead.
|
||||
package jsonpb
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// AnyResolver takes a type URL, present in an Any message,
|
||||
// and resolves it into an instance of the associated message.
|
||||
type AnyResolver interface {
|
||||
Resolve(typeURL string) (proto.Message, error)
|
||||
}
|
||||
|
||||
type anyResolver struct{ AnyResolver }
|
||||
|
||||
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
|
||||
return r.FindMessageByURL(string(message))
|
||||
}
|
||||
|
||||
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
|
||||
m, err := r.Resolve(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protoimpl.X.MessageTypeOf(m), nil
|
||||
}
|
||||
|
||||
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
func wellKnownType(s protoreflect.FullName) string {
|
||||
if s.Parent() == "google.protobuf" {
|
||||
switch s.Name() {
|
||||
case "Empty", "Any",
|
||||
"BoolValue", "BytesValue", "StringValue",
|
||||
"Int32Value", "UInt32Value", "FloatValue",
|
||||
"Int64Value", "UInt64Value", "DoubleValue",
|
||||
"Duration", "Timestamp",
|
||||
"NullValue", "Struct", "Value", "ListValue":
|
||||
return string(s.Name())
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
669
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
669
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
|
@ -0,0 +1,669 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
// This package is intended to be a more powerful and safer alternative to
|
||||
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
||||
// It is intended to only be used in tests, as performance is not a goal and
|
||||
// it may panic if it cannot compare the values. Its propensity towards
|
||||
// panicking means that its unsuitable for production environments where a
|
||||
// spurious panic may be fatal.
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// - When the default behavior of equality does not suit the test's needs,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as
|
||||
// they are within some tolerance of each other.
|
||||
//
|
||||
// - Types with an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation
|
||||
// for the types that they define.
|
||||
//
|
||||
// - If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on
|
||||
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
|
||||
// unexported fields are not compared by default; they result in panics
|
||||
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
|
||||
// or explicitly compared using the Exporter option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// TODO(≥go1.18): Use any instead of interface{}.
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// - Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is non-zero,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform
|
||||
// the current values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// - Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings,
|
||||
// and channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
|
||||
// explicitly permits comparing the unexported field.
|
||||
//
|
||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored slice or array elements report equal.
|
||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
//
|
||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored map entries report equal.
|
||||
// Map keys are equal according to the == operator.
|
||||
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
|
||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
//
|
||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
||||
// where they have the same underlying concrete type and recursively
|
||||
// calling Equal on the underlying values reports equal.
|
||||
//
|
||||
// Before recursing into a pointer, slice element, or map, the current path
|
||||
// is checked to detect whether the address has already been visited.
|
||||
// If there is a cycle, then the pointed at values are considered equal
|
||||
// only if both addresses were previously visited in the same path step.
|
||||
func Equal(x, y interface{}, opts ...Option) bool {
|
||||
s := newState(opts)
|
||||
s.compareAny(rootStep(x, y))
|
||||
return s.result.Equal()
|
||||
}
|
||||
|
||||
// Diff returns a human-readable report of the differences between two values:
|
||||
// y - x. It returns an empty string if and only if Equal returns true for the
|
||||
// same input values and options.
|
||||
//
|
||||
// The output is displayed as a literal in pseudo-Go syntax.
|
||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||
// a "+" prefix to indicates an element added from y, and the lack of a prefix
|
||||
// indicates an element common to both x and y. If possible, the output
|
||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||
// readable outputs. In such cases, the string is prefixed with either an
|
||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
||||
//
|
||||
// Do not depend on this output being stable. If you need the ability to
|
||||
// programmatically interpret the difference, consider using a custom Reporter.
|
||||
func Diff(x, y interface{}, opts ...Option) string {
|
||||
s := newState(opts)
|
||||
|
||||
// Optimization: If there are no other reporters, we can optimize for the
|
||||
// common case where the result is equal (and thus no reported difference).
|
||||
// This avoids the expensive construction of a difference tree.
|
||||
if len(s.reporters) == 0 {
|
||||
s.compareAny(rootStep(x, y))
|
||||
if s.result.Equal() {
|
||||
return ""
|
||||
}
|
||||
s.result = diff.Result{} // Reset results
|
||||
}
|
||||
|
||||
r := new(defaultReporter)
|
||||
s.reporters = append(s.reporters, reporter{r})
|
||||
s.compareAny(rootStep(x, y))
|
||||
d := r.String()
|
||||
if (d == "") != s.result.Equal() {
|
||||
panic("inconsistent difference and equality results")
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// rootStep constructs the first path step. If x and y have differing types,
|
||||
// then they are stored within an empty interface type.
|
||||
func rootStep(x, y interface{}) PathStep {
|
||||
vx := reflect.ValueOf(x)
|
||||
vy := reflect.ValueOf(y)
|
||||
|
||||
// If the inputs are different types, auto-wrap them in an empty interface
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = anyType
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
vx = vvx
|
||||
}
|
||||
if vy.IsValid() {
|
||||
vvy := reflect.New(t).Elem()
|
||||
vvy.Set(vy)
|
||||
vy = vvy
|
||||
}
|
||||
} else {
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
return &pathStep{t, vx, vy}
|
||||
}
|
||||
|
||||
type state struct {
|
||||
// These fields represent the "comparison state".
|
||||
// Calling statelessCompare must not result in observable changes to these.
|
||||
result diff.Result // The current result of comparison
|
||||
curPath Path // The current path in the value tree
|
||||
curPtrs pointerPath // The current set of visited pointers
|
||||
reporters []reporter // Optional reporters
|
||||
|
||||
// recChecker checks for infinite cycles applying the same set of
|
||||
// transformers upon the output of itself.
|
||||
recChecker recChecker
|
||||
|
||||
// dynChecker triggers pseudo-random checks for option correctness.
|
||||
// It is safe for statelessCompare to mutate this value.
|
||||
dynChecker dynChecker
|
||||
|
||||
// These fields, once set by processOption, will not change.
|
||||
exporters []exporter // List of exporters for structs with unexported fields
|
||||
opts Options // List of all fundamental and filter options
|
||||
}
|
||||
|
||||
func newState(opts []Option) *state {
|
||||
// Always ensure a validator option exists to validate the inputs.
|
||||
s := &state{opts: Options{validator{}}}
|
||||
s.curPtrs.Init()
|
||||
s.processOption(Options(opts))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *state) processOption(opt Option) {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
case Options:
|
||||
for _, o := range opt {
|
||||
s.processOption(o)
|
||||
}
|
||||
case coreOption:
|
||||
type filtered interface {
|
||||
isFiltered() bool
|
||||
}
|
||||
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
|
||||
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
|
||||
}
|
||||
s.opts = append(s.opts, opt)
|
||||
case exporter:
|
||||
s.exporters = append(s.exporters, opt)
|
||||
case reporter:
|
||||
s.reporters = append(s.reporters, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown option %T", opt))
|
||||
}
|
||||
}
|
||||
|
||||
// statelessCompare compares two values and returns the result.
|
||||
// This function is stateless in that it does not alter the current result,
|
||||
// or output to any registered reporters.
|
||||
func (s *state) statelessCompare(step PathStep) diff.Result {
|
||||
// We do not save and restore curPath and curPtrs because all of the
|
||||
// compareX methods should properly push and pop from them.
|
||||
// It is an implementation bug if the contents of the paths differ from
|
||||
// when calling this function to when returning from it.
|
||||
|
||||
oldResult, oldReporters := s.result, s.reporters
|
||||
s.result = diff.Result{} // Reset result
|
||||
s.reporters = nil // Remove reporters to avoid spurious printouts
|
||||
s.compareAny(step)
|
||||
res := s.result
|
||||
s.result, s.reporters = oldResult, oldReporters
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *state) compareAny(step PathStep) {
|
||||
// Update the path stack.
|
||||
s.curPath.push(step)
|
||||
defer s.curPath.pop()
|
||||
for _, r := range s.reporters {
|
||||
r.PushStep(step)
|
||||
defer r.PopStep()
|
||||
}
|
||||
s.recChecker.Check(s.curPath)
|
||||
|
||||
// Cycle-detection for slice elements (see NOTE in compareSlice).
|
||||
t := step.Type()
|
||||
vx, vy := step.Values()
|
||||
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
|
||||
px, py := vx.Addr(), vy.Addr()
|
||||
if eq, visited := s.curPtrs.Push(px, py); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(px, py)
|
||||
}
|
||||
|
||||
// Rule 1: Check whether an option applies on this node in the value tree.
|
||||
if s.tryOptions(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 2: Check whether the type has a valid Equal method.
|
||||
if s.tryMethod(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 3: Compare based on the underlying kind.
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
s.report(vx.Bool() == vy.Bool(), 0)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s.report(vx.Int() == vy.Int(), 0)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
s.report(vx.Uint() == vy.Uint(), 0)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s.report(vx.Float() == vy.Float(), 0)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
s.report(vx.Complex() == vy.Complex(), 0)
|
||||
case reflect.String:
|
||||
s.report(vx.String() == vy.String(), 0)
|
||||
case reflect.Chan, reflect.UnsafePointer:
|
||||
s.report(vx.Pointer() == vy.Pointer(), 0)
|
||||
case reflect.Func:
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
case reflect.Struct:
|
||||
s.compareStruct(t, vx, vy)
|
||||
case reflect.Slice, reflect.Array:
|
||||
s.compareSlice(t, vx, vy)
|
||||
case reflect.Map:
|
||||
s.compareMap(t, vx, vy)
|
||||
case reflect.Ptr:
|
||||
s.comparePtr(t, vx, vy)
|
||||
case reflect.Interface:
|
||||
s.compareInterface(t, vx, vy)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Evaluate all filters and apply the remaining options.
|
||||
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
|
||||
opt.apply(s, vx, vy)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Check if this type even has an Equal method.
|
||||
m, ok := t.MethodByName("Equal")
|
||||
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
|
||||
return false
|
||||
}
|
||||
|
||||
eq := s.callTTBFunc(m.Func, vx, vy)
|
||||
s.report(eq, reportByMethod)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
|
||||
// Run the function twice and ensure that we get the same results back.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, v)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{v})[0]
|
||||
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
|
||||
// To avoid false-positives with non-reflexive equality operations,
|
||||
// we sanity check whether a value is equal to itself.
|
||||
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
|
||||
return want
|
||||
}
|
||||
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
|
||||
// Swapping the input arguments is sufficient to check that
|
||||
// f is symmetric and deterministic.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, y, x)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
if !got.IsValid() || got.Bool() != want {
|
||||
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
var ret reflect.Value
|
||||
defer func() {
|
||||
recover() // Ignore panics, let the other call to f panic instead
|
||||
c <- ret
|
||||
}()
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
|
||||
var mayForce, mayForceInit bool
|
||||
step := StructField{&structField{}}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
step.typ = t.Field(i).Type
|
||||
step.vx = vx.Field(i)
|
||||
step.vy = vy.Field(i)
|
||||
step.name = t.Field(i).Name
|
||||
step.idx = i
|
||||
step.unexported = !isExported(step.name)
|
||||
if step.unexported {
|
||||
if step.name == "_" {
|
||||
continue
|
||||
}
|
||||
// Defer checking of unexported fields until later to give an
|
||||
// Ignore a chance to ignore the field.
|
||||
if !vax.IsValid() || !vay.IsValid() {
|
||||
// For retrieveUnexportedField to work, the parent struct must
|
||||
// be addressable. Create a new copy of the values if
|
||||
// necessary to make them addressable.
|
||||
addr = vx.CanAddr() || vy.CanAddr()
|
||||
vax = makeAddressable(vx)
|
||||
vay = makeAddressable(vy)
|
||||
}
|
||||
if !mayForceInit {
|
||||
for _, xf := range s.exporters {
|
||||
mayForce = mayForce || xf(t)
|
||||
}
|
||||
mayForceInit = true
|
||||
}
|
||||
step.mayForce = mayForce
|
||||
step.paddr = addr
|
||||
step.pvx = vax
|
||||
step.pvy = vay
|
||||
step.field = t.Field(i)
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
|
||||
isSlice := t.Kind() == reflect.Slice
|
||||
if isSlice && (vx.IsNil() || vy.IsNil()) {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
|
||||
// since slices represents a list of pointers, rather than a single pointer.
|
||||
// The pointer checking logic must be handled on a per-element basis
|
||||
// in compareAny.
|
||||
//
|
||||
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
|
||||
// pointer P, a length N, and a capacity C. Supposing each slice element has
|
||||
// a memory size of M, then the slice is equivalent to the list of pointers:
|
||||
// [P+i*M for i in range(N)]
|
||||
//
|
||||
// For example, v[:0] and v[:1] are slices with the same starting pointer,
|
||||
// but they are clearly different values. Using the slice pointer alone
|
||||
// violates the assumption that equal pointers implies equal values.
|
||||
|
||||
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
|
||||
withIndexes := func(ix, iy int) SliceIndex {
|
||||
if ix >= 0 {
|
||||
step.vx, step.xkey = vx.Index(ix), ix
|
||||
} else {
|
||||
step.vx, step.xkey = reflect.Value{}, -1
|
||||
}
|
||||
if iy >= 0 {
|
||||
step.vy, step.ykey = vy.Index(iy), iy
|
||||
} else {
|
||||
step.vy, step.ykey = reflect.Value{}, -1
|
||||
}
|
||||
return step
|
||||
}
|
||||
|
||||
// Ignore options are able to ignore missing elements in a slice.
|
||||
// However, detecting these reliably requires an optimal differencing
|
||||
// algorithm, for which diff.Difference is not.
|
||||
//
|
||||
// Instead, we first iterate through both slices to detect which elements
|
||||
// would be ignored if standing alone. The index of non-discarded elements
|
||||
// are stored in a separate slice, which diffing is then performed on.
|
||||
var indexesX, indexesY []int
|
||||
var ignoredX, ignoredY []bool
|
||||
for ix := 0; ix < vx.Len(); ix++ {
|
||||
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesX = append(indexesX, ix)
|
||||
}
|
||||
ignoredX = append(ignoredX, ignored)
|
||||
}
|
||||
for iy := 0; iy < vy.Len(); iy++ {
|
||||
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesY = append(indexesY, iy)
|
||||
}
|
||||
ignoredY = append(ignoredY, ignored)
|
||||
}
|
||||
|
||||
// Compute an edit-script for slices vx and vy (excluding ignored elements).
|
||||
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
|
||||
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
|
||||
})
|
||||
|
||||
// Replay the ignore-scripts and the edit-script.
|
||||
var ix, iy int
|
||||
for ix < vx.Len() || iy < vy.Len() {
|
||||
var e diff.EditType
|
||||
switch {
|
||||
case ix < len(ignoredX) && ignoredX[ix]:
|
||||
e = diff.UniqueX
|
||||
case iy < len(ignoredY) && ignoredY[iy]:
|
||||
e = diff.UniqueY
|
||||
default:
|
||||
e, edits = edits[0], edits[1:]
|
||||
}
|
||||
switch e {
|
||||
case diff.UniqueX:
|
||||
s.compareAny(withIndexes(ix, -1))
|
||||
ix++
|
||||
case diff.UniqueY:
|
||||
s.compareAny(withIndexes(-1, iy))
|
||||
iy++
|
||||
default:
|
||||
s.compareAny(withIndexes(ix, iy))
|
||||
ix++
|
||||
iy++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for maps.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
// We combine and sort the two map keys so that we can perform the
|
||||
// comparisons in a deterministic order.
|
||||
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
|
||||
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
|
||||
step.vx = vx.MapIndex(k)
|
||||
step.vy = vy.MapIndex(k)
|
||||
step.key = k
|
||||
if !step.vx.IsValid() && !step.vy.IsValid() {
|
||||
// It is possible for both vx and vy to be invalid if the
|
||||
// key contained a NaN value in it.
|
||||
//
|
||||
// Even with the ability to retrieve NaN keys in Go 1.12,
|
||||
// there still isn't a sensible way to compare the values since
|
||||
// a NaN key may map to multiple unordered values.
|
||||
// The most reasonable way to compare NaNs would be to compare the
|
||||
// set of values. However, this is impossible to do efficiently
|
||||
// since set equality is provably an O(n^2) operation given only
|
||||
// an Equal function. If we had a Less function or Hash function,
|
||||
// this could be done in O(n*log(n)) or O(n), respectively.
|
||||
//
|
||||
// Rather than adding complex logic to deal with NaNs, make it
|
||||
// the user's responsibility to compare such obscure maps.
|
||||
const help = "consider providing a Comparer to compare the map"
|
||||
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for pointers.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
if vx.Type() != vy.Type() {
|
||||
s.report(false, 0)
|
||||
return
|
||||
}
|
||||
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) report(eq bool, rf resultFlags) {
|
||||
if rf&reportByIgnore == 0 {
|
||||
if eq {
|
||||
s.result.NumSame++
|
||||
rf |= reportEqual
|
||||
} else {
|
||||
s.result.NumDiff++
|
||||
rf |= reportUnequal
|
||||
}
|
||||
}
|
||||
for _, r := range s.reporters {
|
||||
r.Report(Result{flags: rf})
|
||||
}
|
||||
}
|
||||
|
||||
// recChecker tracks the state needed to periodically perform checks that
|
||||
// user provided transformers are not stuck in an infinitely recursive cycle.
|
||||
type recChecker struct{ next int }
|
||||
|
||||
// Check scans the Path for any recursive transformers and panics when any
|
||||
// recursive transformers are detected. Note that the presence of a
|
||||
// recursive Transformer does not necessarily imply an infinite cycle.
|
||||
// As such, this check only activates after some minimal number of path steps.
|
||||
func (rc *recChecker) Check(p Path) {
|
||||
const minLen = 1 << 16
|
||||
if rc.next == 0 {
|
||||
rc.next = minLen
|
||||
}
|
||||
if len(p) < rc.next {
|
||||
return
|
||||
}
|
||||
rc.next <<= 1
|
||||
|
||||
// Check whether the same transformer has appeared at least twice.
|
||||
var ss []string
|
||||
m := map[Option]int{}
|
||||
for _, ps := range p {
|
||||
if t, ok := ps.(Transform); ok {
|
||||
t := t.Option()
|
||||
if m[t] == 1 { // Transformer was used exactly once before
|
||||
tf := t.(*transformer).fnc.Type()
|
||||
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
|
||||
}
|
||||
m[t]++
|
||||
}
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
const warning = "recursive set of Transformers detected"
|
||||
const help = "consider using cmpopts.AcyclicTransformer"
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
|
||||
}
|
||||
}
|
||||
|
||||
// dynChecker tracks the state needed to periodically perform checks that
|
||||
// user provided functions are symmetric and deterministic.
|
||||
// The zero value is safe for immediate use.
|
||||
type dynChecker struct{ curr, next int }
|
||||
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
//
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
// the number of functions calls grows larger.
|
||||
func (dc *dynChecker) Next() bool {
|
||||
ok := dc.curr == dc.next
|
||||
if ok {
|
||||
dc.curr = 0
|
||||
dc.next++
|
||||
}
|
||||
dc.curr++
|
||||
return ok
|
||||
}
|
||||
|
||||
// makeAddressable returns a value that is always addressable.
|
||||
// It returns the input verbatim if it is already addressable,
|
||||
// otherwise it creates a new value and returns an addressable copy.
|
||||
func makeAddressable(v reflect.Value) reflect.Value {
|
||||
if v.CanAddr() {
|
||||
return v
|
||||
}
|
||||
vc := reflect.New(v.Type()).Elem()
|
||||
vc.Set(v)
|
||||
return vc
|
||||
}
|
16
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
Normal file
16
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
const supportExporters = false
|
||||
|
||||
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
|
||||
panic("no support for forcibly accessing unexported fields")
|
||||
}
|
36
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
Normal file
36
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const supportExporters = true
|
||||
|
||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
||||
// a struct such that the value has read-write permissions.
|
||||
//
|
||||
// The parent struct, v, must be addressable, while f must be a StructField
|
||||
// describing the field to retrieve. If addr is false,
|
||||
// then the returned value will be shallowed copied to be non-addressable.
|
||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
||||
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
||||
if !addr {
|
||||
// A field is addressable if and only if the struct is addressable.
|
||||
// If the original parent value was not addressable, shallow copy the
|
||||
// value to make it non-addressable to avoid leaking an implementation
|
||||
// detail of how forcibly exporting a field works.
|
||||
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
||||
return reflect.Zero(f.Type)
|
||||
}
|
||||
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
||||
}
|
||||
return ve
|
||||
}
|
18
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
18
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cmp_debug
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct{}
|
||||
|
||||
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
|
||||
return f
|
||||
}
|
||||
func (debugger) Update() {}
|
||||
func (debugger) Finish() {}
|
123
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
123
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cmp_debug
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The algorithm can be seen running in real-time by enabling debugging:
|
||||
// go test -tags=cmp_debug -v
|
||||
//
|
||||
// Example output:
|
||||
// === RUN TestDifference/#34
|
||||
// ┌───────────────────────────────┐
|
||||
// │ \ · · · · · · · · · · · · · · │
|
||||
// │ · # · · · · · · · · · · · · · │
|
||||
// │ · \ · · · · · · · · · · · · · │
|
||||
// │ · · \ · · · · · · · · · · · · │
|
||||
// │ · · · X # · · · · · · · · · · │
|
||||
// │ · · · # \ · · · · · · · · · · │
|
||||
// │ · · · · · # # · · · · · · · · │
|
||||
// │ · · · · · # \ · · · · · · · · │
|
||||
// │ · · · · · · · \ · · · · · · · │
|
||||
// │ · · · · · · · · \ · · · · · · │
|
||||
// │ · · · · · · · · · \ · · · · · │
|
||||
// │ · · · · · · · · · · \ · · # · │
|
||||
// │ · · · · · · · · · · · \ # # · │
|
||||
// │ · · · · · · · · · · · # # # · │
|
||||
// │ · · · · · · · · · · # # # # · │
|
||||
// │ · · · · · · · · · # # # # # · │
|
||||
// │ · · · · · · · · · · · · · · \ │
|
||||
// └───────────────────────────────┘
|
||||
// [.Y..M.XY......YXYXY.|]
|
||||
//
|
||||
// The grid represents the edit-graph where the horizontal axis represents
|
||||
// list X and the vertical axis represents list Y. The start of the two lists
|
||||
// is the top-left, while the ends are the bottom-right. The '·' represents
|
||||
// an unexplored node in the graph. The '\' indicates that the two symbols
|
||||
// from list X and Y are equal. The 'X' indicates that two symbols are similar
|
||||
// (but not exactly equal) to each other. The '#' indicates that the two symbols
|
||||
// are different (and not similar). The algorithm traverses this graph trying to
|
||||
// make the paths starting in the top-left and the bottom-right connect.
|
||||
//
|
||||
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
|
||||
// the currently established path from the forward and reverse searches,
|
||||
// separated by a '|' character.
|
||||
|
||||
const (
|
||||
updateDelay = 100 * time.Millisecond
|
||||
finishDelay = 500 * time.Millisecond
|
||||
ansiTerminal = true // ANSI escape codes used to move terminal cursor
|
||||
)
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct {
|
||||
sync.Mutex
|
||||
p1, p2 EditScript
|
||||
fwdPath, revPath *EditScript
|
||||
grid []byte
|
||||
lines int
|
||||
}
|
||||
|
||||
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
|
||||
dbg.Lock()
|
||||
dbg.fwdPath, dbg.revPath = p1, p2
|
||||
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
|
||||
row := "│ " + strings.Repeat("· ", nx) + "│\n"
|
||||
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
|
||||
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
|
||||
dbg.lines = strings.Count(dbg.String(), "\n")
|
||||
fmt.Print(dbg)
|
||||
|
||||
// Wrap the EqualFunc so that we can intercept each result.
|
||||
return func(ix, iy int) (r Result) {
|
||||
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
|
||||
for i := range cell {
|
||||
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
|
||||
}
|
||||
switch r = f(ix, iy); {
|
||||
case r.Equal():
|
||||
cell[0] = '\\'
|
||||
case r.Similar():
|
||||
cell[0] = 'X'
|
||||
default:
|
||||
cell[0] = '#'
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (dbg *debugger) Update() {
|
||||
dbg.print(updateDelay)
|
||||
}
|
||||
|
||||
func (dbg *debugger) Finish() {
|
||||
dbg.print(finishDelay)
|
||||
dbg.Unlock()
|
||||
}
|
||||
|
||||
func (dbg *debugger) String() string {
|
||||
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
|
||||
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
|
||||
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
|
||||
}
|
||||
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
|
||||
}
|
||||
|
||||
func (dbg *debugger) print(d time.Duration) {
|
||||
if ansiTerminal {
|
||||
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
|
||||
}
|
||||
fmt.Print(dbg)
|
||||
time.Sleep(d)
|
||||
}
|
402
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
402
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
|
@ -0,0 +1,402 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff implements an algorithm for producing edit-scripts.
|
||||
// The edit-script is a sequence of operations needed to transform one list
|
||||
// of symbols into another (or vice-versa). The edits allowed are insertions,
|
||||
// deletions, and modifications. The summation of all edits is called the
|
||||
// Levenshtein distance as this problem is well-known in computer science.
|
||||
//
|
||||
// This package prioritizes performance over accuracy. That is, the run time
|
||||
// is more important than obtaining a minimal Levenshtein distance.
|
||||
package diff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
// EditType represents a single operation within an edit-script.
|
||||
type EditType uint8
|
||||
|
||||
const (
|
||||
// Identity indicates that a symbol pair is identical in both list X and Y.
|
||||
Identity EditType = iota
|
||||
// UniqueX indicates that a symbol only exists in X and not Y.
|
||||
UniqueX
|
||||
// UniqueY indicates that a symbol only exists in Y and not X.
|
||||
UniqueY
|
||||
// Modified indicates that a symbol pair is a modification of each other.
|
||||
Modified
|
||||
)
|
||||
|
||||
// EditScript represents the series of differences between two lists.
|
||||
type EditScript []EditType
|
||||
|
||||
// String returns a human-readable string representing the edit-script where
|
||||
// Identity, UniqueX, UniqueY, and Modified are represented by the
|
||||
// '.', 'X', 'Y', and 'M' characters, respectively.
|
||||
func (es EditScript) String() string {
|
||||
b := make([]byte, len(es))
|
||||
for i, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
b[i] = '.'
|
||||
case UniqueX:
|
||||
b[i] = 'X'
|
||||
case UniqueY:
|
||||
b[i] = 'Y'
|
||||
case Modified:
|
||||
b[i] = 'M'
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// stats returns a histogram of the number of each type of edit operation.
|
||||
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
s.NI++
|
||||
case UniqueX:
|
||||
s.NX++
|
||||
case UniqueY:
|
||||
s.NY++
|
||||
case Modified:
|
||||
s.NM++
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
|
||||
// lists X and Y are equal.
|
||||
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
|
||||
|
||||
// LenX is the length of the X list.
|
||||
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
|
||||
|
||||
// LenY is the length of the Y list.
|
||||
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
|
||||
|
||||
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
|
||||
// When called by Difference, the index is guaranteed to be within nx and ny.
|
||||
type EqualFunc func(ix int, iy int) Result
|
||||
|
||||
// Result is the result of comparison.
|
||||
// NumSame is the number of sub-elements that are equal.
|
||||
// NumDiff is the number of sub-elements that are not equal.
|
||||
type Result struct{ NumSame, NumDiff int }
|
||||
|
||||
// BoolResult returns a Result that is either Equal or not Equal.
|
||||
func BoolResult(b bool) Result {
|
||||
if b {
|
||||
return Result{NumSame: 1} // Equal, Similar
|
||||
} else {
|
||||
return Result{NumDiff: 2} // Not Equal, not Similar
|
||||
}
|
||||
}
|
||||
|
||||
// Equal indicates whether the symbols are equal. Two symbols are equal
|
||||
// if and only if NumDiff == 0. If Equal, then they are also Similar.
|
||||
func (r Result) Equal() bool { return r.NumDiff == 0 }
|
||||
|
||||
// Similar indicates whether two symbols are similar and may be represented
|
||||
// by using the Modified type. As a special case, we consider binary comparisons
|
||||
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
|
||||
//
|
||||
// The exact ratio of NumSame to NumDiff to determine similarity may change.
|
||||
func (r Result) Similar() bool {
|
||||
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
|
||||
return r.NumSame+1 >= r.NumDiff
|
||||
}
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
// Difference reports whether two lists of lengths nx and ny are equal
|
||||
// given the definition of equality provided as f.
|
||||
//
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// - eq == (es.Dist()==0)
|
||||
// - nx == es.LenX()
|
||||
// - ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
// favors performance over optimality. The exact output is not guaranteed to
|
||||
// be stable and may change over time.
|
||||
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// This algorithm is based on traversing what is known as an "edit-graph".
|
||||
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
|
||||
// by Eugene W. Myers. Since D can be as large as N itself, this is
|
||||
// effectively O(N^2). Unlike the algorithm from that paper, we are not
|
||||
// interested in the optimal path, but at least some "decent" path.
|
||||
//
|
||||
// For example, let X and Y be lists of symbols:
|
||||
// X = [A B C A B B A]
|
||||
// Y = [C B A B A C]
|
||||
//
|
||||
// The edit-graph can be drawn as the following:
|
||||
// A B C A B B A
|
||||
// ┌─────────────┐
|
||||
// C │_|_|\|_|_|_|_│ 0
|
||||
// B │_|\|_|_|\|\|_│ 1
|
||||
// A │\|_|_|\|_|_|\│ 2
|
||||
// B │_|\|_|_|\|\|_│ 3
|
||||
// A │\|_|_|\|_|_|\│ 4
|
||||
// C │ | |\| | | | │ 5
|
||||
// └─────────────┘ 6
|
||||
// 0 1 2 3 4 5 6 7
|
||||
//
|
||||
// List X is written along the horizontal axis, while list Y is written
|
||||
// along the vertical axis. At any point on this grid, if the symbol in
|
||||
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
|
||||
// The goal of any minimal edit-script algorithm is to find a path from the
|
||||
// top-left corner to the bottom-right corner, while traveling through the
|
||||
// fewest horizontal or vertical edges.
|
||||
// A horizontal edge is equivalent to inserting a symbol from list X.
|
||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// - fwdFrontier.X < revFrontier.X
|
||||
// - fwdFrontier.Y < revFrontier.Y
|
||||
//
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
fwdFrontier := fwdPath.point // Forward search frontier
|
||||
revFrontier := revPath.point // Reverse search frontier
|
||||
|
||||
// Search budget bounds the cost of searching for better paths.
|
||||
// The longest sequence of non-matching symbols that can be tolerated is
|
||||
// approximately the square-root of the search budget.
|
||||
searchBudget := 4 * (nx + ny) // O(n)
|
||||
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
|
||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// - Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner).
|
||||
// The goal of the search is connect with the search
|
||||
// from the opposite corner.
|
||||
// - As we search, we build a path in a greedy manner,
|
||||
// where the first match seen is added to the path (this is sub-optimal,
|
||||
// but provides a decent result in practice). When matches are found,
|
||||
// we try the next pair of symbols in the lists and follow all matches
|
||||
// as far as possible.
|
||||
// - When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found,
|
||||
// we advance the frontier towards the opposite corner.
|
||||
// - This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
// that two lists commonly differ because elements were added to the front
|
||||
// or end of the other list.
|
||||
//
|
||||
// Non-deterministically start with either the forward or reverse direction
|
||||
// to introduce some deliberate instability so that we have the flexibility
|
||||
// to change this algorithm in the future.
|
||||
if flags.Deterministic || randBool {
|
||||
goto forwardSearch
|
||||
} else {
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
forwardSearch:
|
||||
{
|
||||
// Forward search from the beginning.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||
switch {
|
||||
case p.X >= revPath.X || p.Y < fwdPath.Y:
|
||||
stop1 = true // Hit top-right corner
|
||||
case p.Y >= revPath.Y || p.X < fwdPath.X:
|
||||
stop2 = true // Hit bottom-left corner
|
||||
case f(p.X, p.Y).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
fwdPath.connect(p, f)
|
||||
fwdPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(fwdPath.X, fwdPath.Y).Equal() {
|
||||
break
|
||||
}
|
||||
fwdPath.append(Identity)
|
||||
}
|
||||
fwdFrontier = fwdPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards reverse point.
|
||||
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
|
||||
fwdFrontier.X++
|
||||
} else {
|
||||
fwdFrontier.Y++
|
||||
}
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
reverseSearch:
|
||||
{
|
||||
// Reverse search from the end.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{revFrontier.X - z, revFrontier.Y + z}
|
||||
switch {
|
||||
case fwdPath.X >= p.X || revPath.Y < p.Y:
|
||||
stop1 = true // Hit bottom-left corner
|
||||
case fwdPath.Y >= p.Y || revPath.X < p.X:
|
||||
stop2 = true // Hit top-right corner
|
||||
case f(p.X-1, p.Y-1).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
revPath.connect(p, f)
|
||||
revPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(revPath.X-1, revPath.Y-1).Equal() {
|
||||
break
|
||||
}
|
||||
revPath.append(Identity)
|
||||
}
|
||||
revFrontier = revPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards forward point.
|
||||
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
|
||||
revFrontier.X--
|
||||
} else {
|
||||
revFrontier.Y--
|
||||
}
|
||||
goto forwardSearch
|
||||
}
|
||||
|
||||
finishSearch:
|
||||
// Join the forward and reverse paths and then append the reverse path.
|
||||
fwdPath.connect(revPath.point, f)
|
||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
||||
t := revPath.es[i]
|
||||
revPath.es = revPath.es[:i]
|
||||
fwdPath.append(t)
|
||||
}
|
||||
debug.Finish()
|
||||
return fwdPath.es
|
||||
}
|
||||
|
||||
type path struct {
|
||||
dir int // +1 if forward, -1 if reverse
|
||||
point // Leading point of the EditScript path
|
||||
es EditScript
|
||||
}
|
||||
|
||||
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
|
||||
// to the edit-script to connect p.point to dst.
|
||||
func (p *path) connect(dst point, f EqualFunc) {
|
||||
if p.dir > 0 {
|
||||
// Connect in forward direction.
|
||||
for dst.X > p.X && dst.Y > p.Y {
|
||||
switch r := f(p.X, p.Y); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case dst.X-p.X >= dst.Y-p.Y:
|
||||
p.append(UniqueX)
|
||||
default:
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
for dst.X > p.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for dst.Y > p.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
} else {
|
||||
// Connect in reverse direction.
|
||||
for p.X > dst.X && p.Y > dst.Y {
|
||||
switch r := f(p.X-1, p.Y-1); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case p.Y-dst.Y >= p.X-dst.X:
|
||||
p.append(UniqueY)
|
||||
default:
|
||||
p.append(UniqueX)
|
||||
}
|
||||
}
|
||||
for p.X > dst.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for p.Y > dst.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *path) append(t EditType) {
|
||||
p.es = append(p.es, t)
|
||||
switch t {
|
||||
case Identity, Modified:
|
||||
p.add(p.dir, p.dir)
|
||||
case UniqueX:
|
||||
p.add(p.dir, 0)
|
||||
case UniqueY:
|
||||
p.add(0, p.dir)
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
|
||||
type point struct{ X, Y int }
|
||||
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
//
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x >> 1
|
||||
}
|
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flags
|
||||
|
||||
// Deterministic controls whether the output of Diff should be deterministic.
|
||||
// This is only used for testing.
|
||||
var Deterministic bool
|
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package function provides functionality for identifying function types.
|
||||
package function
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type funcType int
|
||||
|
||||
const (
|
||||
_ funcType = iota
|
||||
|
||||
tbFunc // func(T) bool
|
||||
ttbFunc // func(T, T) bool
|
||||
trbFunc // func(T, R) bool
|
||||
tibFunc // func(T, I) bool
|
||||
trFunc // func(T) R
|
||||
|
||||
Equal = ttbFunc // func(T, T) bool
|
||||
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
|
||||
Transformer = trFunc // func(T) R
|
||||
ValueFilter = ttbFunc // func(T, T) bool
|
||||
Less = ttbFunc // func(T, T) bool
|
||||
ValuePredicate = tbFunc // func(T) bool
|
||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
||||
)
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
|
||||
// IsType reports whether the reflect.Type is of the specified function type.
|
||||
func IsType(t reflect.Type, ft funcType) bool {
|
||||
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
|
||||
return false
|
||||
}
|
||||
ni, no := t.NumIn(), t.NumOut()
|
||||
switch ft {
|
||||
case tbFunc: // func(T) bool
|
||||
if ni == 1 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case ttbFunc: // func(T, T) bool
|
||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trbFunc: // func(T, R) bool
|
||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case tibFunc: // func(T, I) bool
|
||||
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trFunc: // func(T) R
|
||||
if ni == 1 && no == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
|
||||
|
||||
// NameOf returns the name of the function value.
|
||||
func NameOf(v reflect.Value) string {
|
||||
fnc := runtime.FuncForPC(v.Pointer())
|
||||
if fnc == nil {
|
||||
return "<unknown>"
|
||||
}
|
||||
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
|
||||
|
||||
// Method closures have a "-fm" suffix.
|
||||
fullName = strings.TrimSuffix(fullName, "-fm")
|
||||
|
||||
var name string
|
||||
for len(fullName) > 0 {
|
||||
inParen := strings.HasSuffix(fullName, ")")
|
||||
fullName = strings.TrimSuffix(fullName, ")")
|
||||
|
||||
s := lastIdentRx.FindString(fullName)
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
name = s + "." + name
|
||||
fullName = strings.TrimSuffix(fullName, s)
|
||||
|
||||
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
|
||||
fullName = fullName[:i]
|
||||
}
|
||||
fullName = strings.TrimSuffix(fullName, ".")
|
||||
}
|
||||
return strings.TrimSuffix(name, ".")
|
||||
}
|
164
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
164
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
return string(appendTypeName(nil, t, qualified, false))
|
||||
}
|
||||
|
||||
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
||||
// BUG: Go reflection provides no way to disambiguate two named types
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Use the "any" alias instead of "interface{}" for better readability.
|
||||
if t == anyType {
|
||||
return append(b, "any"...)
|
||||
}
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, t.PkgPath()...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
b = append(b, t.Name()...)
|
||||
} else {
|
||||
b = append(b, t.String()...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Unnamed type.
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
b = append(b, k.String()...)
|
||||
case reflect.Chan:
|
||||
if t.ChanDir() == reflect.RecvDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, "chan"...)
|
||||
if t.ChanDir() == reflect.SendDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Func:
|
||||
if !elideFunc {
|
||||
b = append(b, "func"...)
|
||||
}
|
||||
b = append(b, '(')
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
if i == t.NumIn()-1 && t.IsVariadic() {
|
||||
b = append(b, "..."...)
|
||||
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
||||
} else {
|
||||
b = appendTypeName(b, t.In(i), qualified, false)
|
||||
}
|
||||
}
|
||||
b = append(b, ')')
|
||||
switch t.NumOut() {
|
||||
case 0:
|
||||
// Do nothing
|
||||
case 1:
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Out(0), qualified, false)
|
||||
default:
|
||||
b = append(b, " ("...)
|
||||
for i := 0; i < t.NumOut(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
b = appendTypeName(b, t.Out(i), qualified, false)
|
||||
}
|
||||
b = append(b, ')')
|
||||
}
|
||||
case reflect.Struct:
|
||||
b = append(b, "struct{ "...)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !sf.Anonymous {
|
||||
if qualified && sf.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, sf.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, sf.Name...)
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = appendTypeName(b, sf.Type, qualified, false)
|
||||
if sf.Tag != "" {
|
||||
b = append(b, ' ')
|
||||
b = strconv.AppendQuote(b, string(sf.Tag))
|
||||
}
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
case reflect.Slice, reflect.Array:
|
||||
b = append(b, '[')
|
||||
if k == reflect.Array {
|
||||
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
||||
}
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Map:
|
||||
b = append(b, "map["...)
|
||||
b = appendTypeName(b, t.Key(), qualified, false)
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Ptr:
|
||||
b = append(b, '*')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Interface:
|
||||
b = append(b, "interface{ "...)
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
m := t.Method(i)
|
||||
if qualified && m.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, m.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, m.Name...)
|
||||
b = appendTypeName(b, m.Type, qualified, true)
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
default:
|
||||
panic("invalid kind: " + k.String())
|
||||
}
|
||||
return b
|
||||
}
|
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
Normal file
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
// +build purego
|
||||
|
||||
package value
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p uintptr
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
|
||||
// assumes that the GC implementation does not use a moving collector.
|
||||
return Pointer{v.Pointer(), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == 0
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return p.p
|
||||
}
|
37
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
Normal file
37
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
// +build !purego
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p unsafe.Pointer
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// The proper representation of a pointer is unsafe.Pointer,
|
||||
// which is necessary if the GC ever uses a moving collector.
|
||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return uintptr(p.p)
|
||||
}
|
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
|
||||
// The type of each value must be comparable.
|
||||
func SortKeys(vs []reflect.Value) []reflect.Value {
|
||||
if len(vs) == 0 {
|
||||
return vs
|
||||
}
|
||||
|
||||
// Sort the map keys.
|
||||
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
||||
|
||||
// Deduplicate keys (fails for NaNs).
|
||||
vs2 := vs[:1]
|
||||
for _, v := range vs[1:] {
|
||||
if isLess(vs2[len(vs2)-1], v) {
|
||||
vs2 = append(vs2, v)
|
||||
}
|
||||
}
|
||||
return vs2
|
||||
}
|
||||
|
||||
// isLess is a generic function for sorting arbitrary map keys.
|
||||
// The inputs must be of the same type and must be comparable.
|
||||
func isLess(x, y reflect.Value) bool {
|
||||
switch x.Type().Kind() {
|
||||
case reflect.Bool:
|
||||
return !x.Bool() && y.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return x.Int() < y.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return x.Uint() < y.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
// NOTE: This does not sort -0 as less than +0
|
||||
// since Go maps treat -0 and +0 as equal keys.
|
||||
fx, fy := x.Float(), y.Float()
|
||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
cx, cy := x.Complex(), y.Complex()
|
||||
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
|
||||
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
|
||||
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
|
||||
}
|
||||
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
|
||||
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
|
||||
return x.Pointer() < y.Pointer()
|
||||
case reflect.String:
|
||||
return x.String() < y.String()
|
||||
case reflect.Array:
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
if isLess(x.Index(i), y.Index(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Index(i), x.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Struct:
|
||||
for i := 0; i < x.NumField(); i++ {
|
||||
if isLess(x.Field(i), y.Field(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Field(i), x.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Interface:
|
||||
vx, vy := x.Elem(), y.Elem()
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return !vx.IsValid() && vy.IsValid()
|
||||
}
|
||||
tx, ty := vx.Type(), vy.Type()
|
||||
if tx == ty {
|
||||
return isLess(x.Elem(), y.Elem())
|
||||
}
|
||||
if tx.Kind() != ty.Kind() {
|
||||
return vx.Kind() < vy.Kind()
|
||||
}
|
||||
if tx.String() != ty.String() {
|
||||
return tx.String() < ty.String()
|
||||
}
|
||||
if tx.PkgPath() != ty.PkgPath() {
|
||||
return tx.PkgPath() < ty.PkgPath()
|
||||
}
|
||||
// This can happen in rare situations, so we fallback to just comparing
|
||||
// the unique pointer for a reflect.Type. This guarantees deterministic
|
||||
// ordering within a program, but it is obviously not stable.
|
||||
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
|
||||
default:
|
||||
// Must be Func, Map, or Slice; which are not comparable.
|
||||
panic(fmt.Sprintf("%T is not comparable", x.Type()))
|
||||
}
|
||||
}
|
554
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
554
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
|
@ -0,0 +1,554 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// Option configures for specific behavior of Equal and Diff. In particular,
|
||||
// the fundamental Option functions (Ignore, Transformer, and Comparer),
|
||||
// configure how equality is determined.
|
||||
//
|
||||
// The fundamental options may be composed with filters (FilterPath and
|
||||
// FilterValues) to control the scope over which they are applied.
|
||||
//
|
||||
// The cmp/cmpopts package provides helper functions for creating options that
|
||||
// may be used with Equal and Diff.
|
||||
type Option interface {
|
||||
// filter applies all filters and returns the option that remains.
|
||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
||||
//
|
||||
// An Options is returned only if multiple comparers or transformers
|
||||
// can apply simultaneously and will only contain values of those types
|
||||
// or sub-Options containing values of those types.
|
||||
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
Option
|
||||
|
||||
// apply executes the option, which may mutate s or panic.
|
||||
apply(s *state, vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
Option
|
||||
isCore()
|
||||
}
|
||||
|
||||
type core struct{}
|
||||
|
||||
func (core) isCore() {}
|
||||
|
||||
// Options is a list of Option values that also satisfies the Option interface.
|
||||
// Helper comparison packages may return an Options value when packing multiple
|
||||
// Option values into a single Option. When this package processes an Options,
|
||||
// it will be implicitly expanded into a flat list.
|
||||
//
|
||||
// Applying a filter on an Options is equivalent to applying that same filter
|
||||
// on all individual options held within.
|
||||
type Options []Option
|
||||
|
||||
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
|
||||
for _, opt := range opts {
|
||||
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
|
||||
case ignore:
|
||||
return ignore{} // Only ignore can short-circuit evaluation
|
||||
case validator:
|
||||
out = validator{} // Takes precedence over comparer or transformer
|
||||
case *comparer, *transformer, Options:
|
||||
switch out.(type) {
|
||||
case nil:
|
||||
out = opt
|
||||
case validator:
|
||||
// Keep validator
|
||||
case *comparer, *transformer, Options:
|
||||
out = Options{out, opt} // Conflicting comparers or transformers
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (opts Options) apply(s *state, _, _ reflect.Value) {
|
||||
const warning = "ambiguous set of applicable options"
|
||||
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
|
||||
var ss []string
|
||||
for _, opt := range flattenOptions(nil, opts) {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
|
||||
}
|
||||
|
||||
func (opts Options) String() string {
|
||||
var ss []string
|
||||
for _, opt := range opts {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
||||
}
|
||||
|
||||
// FilterPath returns a new Option where opt is only evaluated if filter f
|
||||
// returns true for the current Path in the value tree.
|
||||
//
|
||||
// This filter is called even if a slice element or map entry is missing and
|
||||
// provides an opportunity to ignore such cases. The filter function must be
|
||||
// symmetric such that the filter result is identical regardless of whether the
|
||||
// missing value is from x or y.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
||||
if f == nil {
|
||||
panic("invalid path filter function")
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
return &pathFilter{fnc: f, opt: opt}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathFilter struct {
|
||||
core
|
||||
fnc func(Path) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if f.fnc(s.curPath) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f pathFilter) String() string {
|
||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
||||
}
|
||||
|
||||
// FilterValues returns a new Option where opt is only evaluated if filter f,
|
||||
// which is a function of the form "func(T, T) bool", returns true for the
|
||||
// current pair of values being compared. If either value is invalid or
|
||||
// the type of the values is not assignable to T, then this filter implicitly
|
||||
// returns false.
|
||||
//
|
||||
// The filter function must be
|
||||
// symmetric (i.e., agnostic to the order of the inputs) and
|
||||
// deterministic (i.e., produces the same result when given the same inputs).
|
||||
// If T is an interface, it is possible that f is called with two values with
|
||||
// different concrete types that both implement T.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
func FilterValues(f interface{}, opt Option) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid values filter function: %T", f))
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
vf := &valuesFilter{fnc: v, opt: opt}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
vf.typ = ti
|
||||
}
|
||||
return vf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type valuesFilter struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
|
||||
return nil
|
||||
}
|
||||
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f valuesFilter) String() string {
|
||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
||||
}
|
||||
|
||||
// Ignore is an Option that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with FilterPath or FilterValues.
|
||||
// It is an error to pass an unfiltered Ignore option to Equal.
|
||||
func Ignore() Option { return ignore{} }
|
||||
|
||||
type ignore struct{ core }
|
||||
|
||||
func (ignore) isFiltered() bool { return false }
|
||||
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
|
||||
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
|
||||
func (ignore) String() string { return "Ignore()" }
|
||||
|
||||
// validator is a sentinel Option type to indicate that some options could not
|
||||
// be evaluated due to unexported fields, missing slice elements, or
|
||||
// missing map entries. Both values are validator only for unexported fields.
|
||||
type validator struct{ core }
|
||||
|
||||
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return validator{}
|
||||
}
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
return validator{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (validator) apply(s *state, vx, vy reflect.Value) {
|
||||
// Implies missing slice element or map entry.
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
s.report(vx.IsValid() == vy.IsValid(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Unable to Interface implies unexported field without visibility access.
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
|
||||
var name string
|
||||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
||||
// Named type with unexported fields.
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
}
|
||||
} else {
|
||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
||||
var pkgPath string
|
||||
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
|
||||
pkgPath = t.Field(i).PkgPath
|
||||
}
|
||||
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
|
||||
}
|
||||
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
|
||||
}
|
||||
|
||||
panic("not reachable")
|
||||
}
|
||||
|
||||
// identRx represents a valid identifier according to the Go specification.
|
||||
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
||||
|
||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
|
||||
// Transformer returns an Option that applies a transformation function that
|
||||
// converts values of a certain type into that of another.
|
||||
//
|
||||
// The transformer f must be a function "func(T) R" that converts values of
|
||||
// type T to those of type R and is implicitly filtered to input values
|
||||
// assignable to T. The transformer must not mutate T in any way.
|
||||
//
|
||||
// To help prevent some cases of infinite recursive cycles applying the
|
||||
// same transform to the output of itself (e.g., in the case where the
|
||||
// input and output types are the same), an implicit filter is added such that
|
||||
// a transformer is applicable only if that exact transformer is not already
|
||||
// in the tail of the Path since the last non-Transform step.
|
||||
// For situations where the implicit filter is still insufficient,
|
||||
// consider using cmpopts.AcyclicTransformer, which adds a filter
|
||||
// to prevent the transformer from being recursively applied upon itself.
|
||||
//
|
||||
// The name is a user provided label that is used as the Transform.Name in the
|
||||
// transformation PathStep (and eventually shown in the Diff output).
|
||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
||||
// If empty, an arbitrary name is used.
|
||||
func Transformer(name string, f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid transformer function: %T", f))
|
||||
}
|
||||
if name == "" {
|
||||
name = function.NameOf(v)
|
||||
if !identsRx.MatchString(name) {
|
||||
name = "λ" // Lambda-symbol as placeholder name
|
||||
}
|
||||
} else if !identsRx.MatchString(name) {
|
||||
panic(fmt.Sprintf("invalid name: %q", name))
|
||||
}
|
||||
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
tr.typ = ti
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
type transformer struct {
|
||||
core
|
||||
name string
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T) R
|
||||
}
|
||||
|
||||
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
|
||||
|
||||
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
for i := len(s.curPath) - 1; i >= 0; i-- {
|
||||
if t, ok := s.curPath[i].(Transform); !ok {
|
||||
break // Hit most recent non-Transform step
|
||||
} else if tr == t.trans {
|
||||
return nil // Cannot directly use same Transform
|
||||
}
|
||||
}
|
||||
if tr.typ == nil || t.AssignableTo(tr.typ) {
|
||||
return tr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
|
||||
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
|
||||
vvx := s.callTRFunc(tr.fnc, vx, step)
|
||||
vvy := s.callTRFunc(tr.fnc, vy, step)
|
||||
step.vx, step.vy = vvx, vvy
|
||||
s.compareAny(step)
|
||||
}
|
||||
|
||||
func (tr transformer) String() string {
|
||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
||||
}
|
||||
|
||||
// Comparer returns an Option that determines whether two values are equal
|
||||
// to each other.
|
||||
//
|
||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
||||
// filtered to input values assignable to T. If T is an interface, it is
|
||||
// possible that f is called with two values of different concrete types that
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// - Symmetric: equal(x, y) == equal(y, x)
|
||||
// - Deterministic: equal(x, y) == equal(x, y)
|
||||
// - Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid comparer function: %T", f))
|
||||
}
|
||||
cm := &comparer{fnc: v}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
cm.typ = ti
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
type comparer struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
|
||||
|
||||
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
if cm.typ == nil || t.AssignableTo(cm.typ) {
|
||||
return cm
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
|
||||
eq := s.callTTBFunc(cm.fnc, vx, vy)
|
||||
s.report(eq, reportByFunc)
|
||||
}
|
||||
|
||||
func (cm comparer) String() string {
|
||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
||||
}
|
||||
|
||||
// Exporter returns an Option that specifies whether Equal is allowed to
|
||||
// introspect into the unexported fields of certain struct types.
|
||||
//
|
||||
// Users of this option must understand that comparing on unexported fields
|
||||
// from external packages is not safe since changes in the internal
|
||||
// implementation of some external package may cause the result of Equal
|
||||
// to unexpectedly change. However, it may be valid to use this option on types
|
||||
// defined in an internal package where the semantic meaning of an unexported
|
||||
// field is in the control of the user.
|
||||
//
|
||||
// In many cases, a custom Comparer should be used instead that defines
|
||||
// equality as a function of the public API of a type rather than the underlying
|
||||
// unexported implementation.
|
||||
//
|
||||
// For example, the reflect.Type documentation defines equality to be determined
|
||||
// by the == operator on the interface (essentially performing a shallow pointer
|
||||
// comparison) and most attempts to compare *regexp.Regexp types are interested
|
||||
// in only checking that the regular expression strings are equal.
|
||||
// Both of these are accomplished using Comparers:
|
||||
//
|
||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
||||
//
|
||||
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
|
||||
// all unexported fields on specified struct types.
|
||||
func Exporter(f func(reflect.Type) bool) Option {
|
||||
if !supportExporters {
|
||||
panic("Exporter is not supported on purego builds")
|
||||
}
|
||||
return exporter(f)
|
||||
}
|
||||
|
||||
type exporter func(reflect.Type) bool
|
||||
|
||||
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// AllowUnexported returns an Options that allows Equal to forcibly introspect
|
||||
// unexported fields of the specified struct types.
|
||||
//
|
||||
// See Exporter for the proper use of this option.
|
||||
func AllowUnexported(types ...interface{}) Option {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, typ := range types {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
||||
}
|
||||
m[t] = true
|
||||
}
|
||||
return exporter(func(t reflect.Type) bool { return m[t] })
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Report (see Reporter).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
}
|
||||
|
||||
// Equal reports whether the node was determined to be equal or not.
|
||||
// As a special case, ignored nodes are considered equal.
|
||||
func (r Result) Equal() bool {
|
||||
return r.flags&(reportEqual|reportByIgnore) != 0
|
||||
}
|
||||
|
||||
// ByIgnore reports whether the node is equal because it was ignored.
|
||||
// This never reports true if Equal reports false.
|
||||
func (r Result) ByIgnore() bool {
|
||||
return r.flags&reportByIgnore != 0
|
||||
}
|
||||
|
||||
// ByMethod reports whether the Equal method determined equality.
|
||||
func (r Result) ByMethod() bool {
|
||||
return r.flags&reportByMethod != 0
|
||||
}
|
||||
|
||||
// ByFunc reports whether a Comparer function determined equality.
|
||||
func (r Result) ByFunc() bool {
|
||||
return r.flags&reportByFunc != 0
|
||||
}
|
||||
|
||||
// ByCycle reports whether a reference cycle was detected.
|
||||
func (r Result) ByCycle() bool {
|
||||
return r.flags&reportByCycle != 0
|
||||
}
|
||||
|
||||
type resultFlags uint
|
||||
|
||||
const (
|
||||
_ resultFlags = (1 << iota) / 2
|
||||
|
||||
reportEqual
|
||||
reportUnequal
|
||||
reportByIgnore
|
||||
reportByMethod
|
||||
reportByFunc
|
||||
reportByCycle
|
||||
)
|
||||
|
||||
// Reporter is an Option that can be passed to Equal. When Equal traverses
|
||||
// the value trees, it calls PushStep as it descends into each node in the
|
||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
||||
// either compared (determined to be equal or not equal) or ignored and reported
|
||||
// as such by calling the Report method.
|
||||
func Reporter(r interface {
|
||||
// PushStep is called when a tree-traversal operation is performed.
|
||||
// The PathStep itself is only valid until the step is popped.
|
||||
// The PathStep.Values are valid for the duration of the entire traversal
|
||||
// and must not be mutated.
|
||||
//
|
||||
// Equal always calls PushStep at the start to provide an operation-less
|
||||
// PathStep used to report the root values.
|
||||
//
|
||||
// Within a slice, the exact set of inserted, removed, or modified elements
|
||||
// is unspecified and may change in future implementations.
|
||||
// The entries of a map are iterated through in an unspecified order.
|
||||
PushStep(PathStep)
|
||||
|
||||
// Report is called exactly once on leaf nodes to report whether the
|
||||
// comparison identified the node as equal, unequal, or ignored.
|
||||
// A leaf node is one that is immediately preceded by and followed by
|
||||
// a pair of PushStep and PopStep calls.
|
||||
Report(Result)
|
||||
|
||||
// PopStep ascends back up the value tree.
|
||||
// There is always a matching pop call for every push call.
|
||||
PopStep()
|
||||
}) Option {
|
||||
return reporter{r}
|
||||
}
|
||||
|
||||
type reporter struct{ reporterIface }
|
||||
type reporterIface interface {
|
||||
PushStep(PathStep)
|
||||
Report(Result)
|
||||
PopStep()
|
||||
}
|
||||
|
||||
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// normalizeOption normalizes the input options such that all Options groups
|
||||
// are flattened and groups with a single element are reduced to that element.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func normalizeOption(src Option) Option {
|
||||
switch opts := flattenOptions(nil, Options{src}); len(opts) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return opts[0]
|
||||
default:
|
||||
return opts
|
||||
}
|
||||
}
|
||||
|
||||
// flattenOptions copies all options in src to dst as a flat list.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func flattenOptions(dst, src Options) Options {
|
||||
for _, opt := range src {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
continue
|
||||
case Options:
|
||||
dst = flattenOptions(dst, opt)
|
||||
case coreOption:
|
||||
dst = append(dst, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid option type: %T", opt))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
380
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
380
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
|
@ -0,0 +1,380 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Path is a list of PathSteps describing the sequence of operations to get
|
||||
// from some root type to the current position in the value tree.
|
||||
// The first Path element is always an operation-less PathStep that exists
|
||||
// simply to identify the initial type.
|
||||
//
|
||||
// When traversing structs with embedded structs, the embedded struct will
|
||||
// always be accessed as a field before traversing the fields of the
|
||||
// embedded struct themselves. That is, an exported field from the
|
||||
// embedded struct will never be accessed directly from the parent struct.
|
||||
type Path []PathStep
|
||||
|
||||
// PathStep is a union-type for specific operations to traverse
|
||||
// a value's tree structure. Users of this package never need to implement
|
||||
// these types as values of this type will be returned by this package.
|
||||
//
|
||||
// Implementations of this interface are
|
||||
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
|
||||
type PathStep interface {
|
||||
String() string
|
||||
|
||||
// Type is the resulting type after performing the path step.
|
||||
Type() reflect.Type
|
||||
|
||||
// Values is the resulting values after performing the path step.
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// - For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// - For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// - For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
var (
|
||||
_ PathStep = StructField{}
|
||||
_ PathStep = SliceIndex{}
|
||||
_ PathStep = MapIndex{}
|
||||
_ PathStep = Indirect{}
|
||||
_ PathStep = TypeAssertion{}
|
||||
_ PathStep = Transform{}
|
||||
)
|
||||
|
||||
func (pa *Path) push(s PathStep) {
|
||||
*pa = append(*pa, s)
|
||||
}
|
||||
|
||||
func (pa *Path) pop() {
|
||||
*pa = (*pa)[:len(*pa)-1]
|
||||
}
|
||||
|
||||
// Last returns the last PathStep in the Path.
|
||||
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
|
||||
func (pa Path) Last() PathStep {
|
||||
return pa.Index(-1)
|
||||
}
|
||||
|
||||
// Index returns the ith step in the Path and supports negative indexing.
|
||||
// A negative index starts counting from the tail of the Path such that -1
|
||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
||||
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
|
||||
func (pa Path) Index(i int) PathStep {
|
||||
if i < 0 {
|
||||
i = len(pa) + i
|
||||
}
|
||||
if i < 0 || i >= len(pa) {
|
||||
return pathStep{}
|
||||
}
|
||||
return pa[i]
|
||||
}
|
||||
|
||||
// String returns the simplified path to a node.
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
for _, s := range pa {
|
||||
if _, ok := s.(StructField); ok {
|
||||
ss = append(ss, s.String())
|
||||
}
|
||||
}
|
||||
return strings.TrimPrefix(strings.Join(ss, ""), ".")
|
||||
}
|
||||
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
var numIndirect int
|
||||
for i, s := range pa {
|
||||
var nextStep PathStep
|
||||
if i+1 < len(pa) {
|
||||
nextStep = pa[i+1]
|
||||
}
|
||||
switch s := s.(type) {
|
||||
case Indirect:
|
||||
numIndirect++
|
||||
pPre, pPost := "(", ")"
|
||||
switch nextStep.(type) {
|
||||
case Indirect:
|
||||
continue // Next step is indirection, so let them batch up
|
||||
case StructField:
|
||||
numIndirect-- // Automatic indirection on struct fields
|
||||
case nil:
|
||||
pPre, pPost = "", "" // Last step; no need for parenthesis
|
||||
}
|
||||
if numIndirect > 0 {
|
||||
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
|
||||
ssPost = append(ssPost, pPost)
|
||||
}
|
||||
numIndirect = 0
|
||||
continue
|
||||
case Transform:
|
||||
ssPre = append(ssPre, s.trans.name+"(")
|
||||
ssPost = append(ssPost, ")")
|
||||
continue
|
||||
}
|
||||
ssPost = append(ssPost, s.String())
|
||||
}
|
||||
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
|
||||
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
|
||||
}
|
||||
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
|
||||
}
|
||||
|
||||
type pathStep struct {
|
||||
typ reflect.Type
|
||||
vx, vy reflect.Value
|
||||
}
|
||||
|
||||
func (ps pathStep) Type() reflect.Type { return ps.typ }
|
||||
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
|
||||
func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := value.TypeString(ps.typ, false)
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
return fmt.Sprintf("{%s}", s)
|
||||
}
|
||||
|
||||
// StructField represents a struct field access on a field called Name.
|
||||
type StructField struct{ *structField }
|
||||
type structField struct {
|
||||
pathStep
|
||||
name string
|
||||
idx int
|
||||
|
||||
// These fields are used for forcibly accessing an unexported field.
|
||||
// pvx, pvy, and field are only valid if unexported is true.
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressable)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
func (sf StructField) Type() reflect.Type { return sf.typ }
|
||||
func (sf StructField) Values() (vx, vy reflect.Value) {
|
||||
if !sf.unexported {
|
||||
return sf.vx, sf.vy // CanInterface reports true
|
||||
}
|
||||
|
||||
// Forcibly obtain read-write access to an unexported struct field.
|
||||
if sf.mayForce {
|
||||
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
||||
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
||||
return vx, vy // CanInterface reports true
|
||||
}
|
||||
return sf.vx, sf.vy // CanInterface reports false
|
||||
}
|
||||
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
||||
|
||||
// Name is the field name.
|
||||
func (sf StructField) Name() string { return sf.name }
|
||||
|
||||
// Index is the index of the field in the parent struct type.
|
||||
// See reflect.Type.Field.
|
||||
func (sf StructField) Index() int { return sf.idx }
|
||||
|
||||
// SliceIndex is an index operation on a slice or array at some index Key.
|
||||
type SliceIndex struct{ *sliceIndex }
|
||||
type sliceIndex struct {
|
||||
pathStep
|
||||
xkey, ykey int
|
||||
isSlice bool // False for reflect.Array
|
||||
}
|
||||
|
||||
func (si SliceIndex) Type() reflect.Type { return si.typ }
|
||||
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
|
||||
func (si SliceIndex) String() string {
|
||||
switch {
|
||||
case si.xkey == si.ykey:
|
||||
return fmt.Sprintf("[%d]", si.xkey)
|
||||
case si.ykey == -1:
|
||||
// [5->?] means "I don't know where X[5] went"
|
||||
return fmt.Sprintf("[%d->?]", si.xkey)
|
||||
case si.xkey == -1:
|
||||
// [?->3] means "I don't know where Y[3] came from"
|
||||
return fmt.Sprintf("[?->%d]", si.ykey)
|
||||
default:
|
||||
// [5->3] means "X[5] moved to Y[3]"
|
||||
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
|
||||
}
|
||||
}
|
||||
|
||||
// Key is the index key; it may return -1 if in a split state
|
||||
func (si SliceIndex) Key() int {
|
||||
if si.xkey != si.ykey {
|
||||
return -1
|
||||
}
|
||||
return si.xkey
|
||||
}
|
||||
|
||||
// SplitKeys are the indexes for indexing into slices in the
|
||||
// x and y values, respectively. These indexes may differ due to the
|
||||
// insertion or removal of an element in one of the slices, causing
|
||||
// all of the indexes to be shifted. If an index is -1, then that
|
||||
// indicates that the element does not exist in the associated slice.
|
||||
//
|
||||
// Key is guaranteed to return -1 if and only if the indexes returned
|
||||
// by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// both indexes.
|
||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
||||
|
||||
// MapIndex is an index operation on a map at some index Key.
|
||||
type MapIndex struct{ *mapIndex }
|
||||
type mapIndex struct {
|
||||
pathStep
|
||||
key reflect.Value
|
||||
}
|
||||
|
||||
func (mi MapIndex) Type() reflect.Type { return mi.typ }
|
||||
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
|
||||
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
|
||||
|
||||
// Key is the value of the map key.
|
||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
||||
|
||||
// Indirect represents pointer indirection on the parent type.
|
||||
type Indirect struct{ *indirect }
|
||||
type indirect struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (in Indirect) Type() reflect.Type { return in.typ }
|
||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
||||
func (in Indirect) String() string { return "*" }
|
||||
|
||||
// TypeAssertion represents a type assertion on an interface.
|
||||
type TypeAssertion struct{ *typeAssertion }
|
||||
type typeAssertion struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||
|
||||
// Transform is a transformation from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
type transform struct {
|
||||
pathStep
|
||||
trans *transformer
|
||||
}
|
||||
|
||||
func (tf Transform) Type() reflect.Type { return tf.typ }
|
||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
||||
|
||||
// Name is the name of the Transformer.
|
||||
func (tf Transform) Name() string { return tf.trans.name }
|
||||
|
||||
// Func is the function pointer to the transformer function.
|
||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
||||
|
||||
// Option returns the originally constructed Transformer option.
|
||||
// The == operator can be used to detect the exact option used.
|
||||
func (tf Transform) Option() Option { return tf.trans }
|
||||
|
||||
// pointerPath represents a dual-stack of pointers encountered when
|
||||
// recursively traversing the x and y values. This data structure supports
|
||||
// detection of cycles and determining whether the cycles are equal.
|
||||
// In Go, cycles can occur via pointers, slices, and maps.
|
||||
//
|
||||
// The pointerPath uses a map to represent a stack; where descension into a
|
||||
// pointer pushes the address onto the stack, and ascension from a pointer
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a separate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
// requires two graphs to have the same structure. To determine this, both the
|
||||
// x and y values must have a cycle where the previous pointers were also
|
||||
// encountered together as a pair.
|
||||
//
|
||||
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
|
||||
// MapIndex with pointer information for the x and y values.
|
||||
// Suppose px and py are two pointers to compare, we then search the
|
||||
// Path for whether px was ever encountered in the Path history of x, and
|
||||
// similarly so with py. If either side has a cycle, the comparison is only
|
||||
// equal if both px and py have a cycle resulting from the same PathStep.
|
||||
//
|
||||
// Using a map as a stack is more performant as we can perform cycle detection
|
||||
// in O(1) instead of O(N) where N is len(Path).
|
||||
type pointerPath struct {
|
||||
// mx is keyed by x pointers, where the value is the associated y pointer.
|
||||
mx map[value.Pointer]value.Pointer
|
||||
// my is keyed by y pointers, where the value is the associated x pointer.
|
||||
my map[value.Pointer]value.Pointer
|
||||
}
|
||||
|
||||
func (p *pointerPath) Init() {
|
||||
p.mx = make(map[value.Pointer]value.Pointer)
|
||||
p.my = make(map[value.Pointer]value.Pointer)
|
||||
}
|
||||
|
||||
// Push indicates intent to descend into pointers vx and vy where
|
||||
// visited reports whether either has been seen before. If visited before,
|
||||
// equal reports whether both pointers were encountered together.
|
||||
// Pop must be called if and only if the pointers were never visited.
|
||||
//
|
||||
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
|
||||
// and be non-nil.
|
||||
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
|
||||
px := value.PointerOf(vx)
|
||||
py := value.PointerOf(vy)
|
||||
_, ok1 := p.mx[px]
|
||||
_, ok2 := p.my[py]
|
||||
if ok1 || ok2 {
|
||||
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
|
||||
return equal, true
|
||||
}
|
||||
p.mx[px] = py
|
||||
p.my[py] = px
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Pop ascends from pointers vx and vy.
|
||||
func (p pointerPath) Pop(vx, vy reflect.Value) {
|
||||
delete(p.mx, value.PointerOf(vx))
|
||||
delete(p.my, value.PointerOf(vy))
|
||||
}
|
||||
|
||||
// isExported reports whether the identifier is exported.
|
||||
func isExported(id string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(id)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
// defaultReporter implements the reporter interface.
|
||||
//
|
||||
// As Equal serially calls the PushStep, Report, and PopStep methods, the
|
||||
// defaultReporter constructs a tree-based representation of the compared value
|
||||
// and the result of each comparison (see valueNode).
|
||||
//
|
||||
// When the String method is called, the FormatDiff method transforms the
|
||||
// valueNode tree into a textNode tree, which is a tree-based representation
|
||||
// of the textual output (see textNode).
|
||||
//
|
||||
// Lastly, the textNode.String method produces the final report as a string.
|
||||
type defaultReporter struct {
|
||||
root *valueNode
|
||||
curr *valueNode
|
||||
}
|
||||
|
||||
func (r *defaultReporter) PushStep(ps PathStep) {
|
||||
r.curr = r.curr.PushStep(ps)
|
||||
if r.root == nil {
|
||||
r.root = r.curr
|
||||
}
|
||||
}
|
||||
func (r *defaultReporter) Report(rs Result) {
|
||||
r.curr.Report(rs)
|
||||
}
|
||||
func (r *defaultReporter) PopStep() {
|
||||
r.curr = r.curr.PopStep()
|
||||
}
|
||||
|
||||
// String provides a full report of the differences detected as a structured
|
||||
// literal in pseudo-Go syntax. String may only be called after the entire tree
|
||||
// has been traversed.
|
||||
func (r *defaultReporter) String() string {
|
||||
assert(r.root != nil && r.curr == nil)
|
||||
if r.root.NumDiff == 0 {
|
||||
return ""
|
||||
}
|
||||
ptrs := new(pointerReferences)
|
||||
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
||||
resolveReferences(text)
|
||||
return text.String()
|
||||
}
|
||||
|
||||
func assert(ok bool) {
|
||||
if !ok {
|
||||
panic("assertion failure")
|
||||
}
|
||||
}
|
433
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
433
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
|
@ -0,0 +1,433 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
const numContextRecords = 2
|
||||
|
||||
type diffMode byte
|
||||
|
||||
const (
|
||||
diffUnknown diffMode = 0
|
||||
diffIdentical diffMode = ' '
|
||||
diffRemoved diffMode = '-'
|
||||
diffInserted diffMode = '+'
|
||||
)
|
||||
|
||||
type typeMode int
|
||||
|
||||
const (
|
||||
// emitType always prints the type.
|
||||
emitType typeMode = iota
|
||||
// elideType never prints the type.
|
||||
elideType
|
||||
// autoType prints the type only for composite kinds
|
||||
// (i.e., structs, slices, arrays, and maps).
|
||||
autoType
|
||||
)
|
||||
|
||||
type formatOptions struct {
|
||||
// DiffMode controls the output mode of FormatDiff.
|
||||
//
|
||||
// If diffUnknown, then produce a diff of the x and y values.
|
||||
// If diffIdentical, then emit values as if they were equal.
|
||||
// If diffRemoved, then only emit x values (ignoring y values).
|
||||
// If diffInserted, then only emit y values (ignoring x values).
|
||||
DiffMode diffMode
|
||||
|
||||
// TypeMode controls whether to print the type for the current node.
|
||||
//
|
||||
// As a general rule of thumb, we always print the type of the next node
|
||||
// after an interface, and always elide the type of the next node after
|
||||
// a slice or map node.
|
||||
TypeMode typeMode
|
||||
|
||||
// formatValueOptions are options specific to printing reflect.Values.
|
||||
formatValueOptions
|
||||
}
|
||||
|
||||
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
|
||||
opts.DiffMode = d
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
||||
opts.TypeMode = t
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
||||
opts.VerbosityLevel = level
|
||||
opts.LimitVerbosity = true
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) verbosity() uint {
|
||||
switch {
|
||||
case opts.VerbosityLevel < 0:
|
||||
return 0
|
||||
case opts.VerbosityLevel > 16:
|
||||
return 16 // some reasonable maximum to avoid shift overflow
|
||||
default:
|
||||
return uint(opts.VerbosityLevel)
|
||||
}
|
||||
}
|
||||
|
||||
const maxVerbosityPreset = 6
|
||||
|
||||
// verbosityPreset modifies the verbosity settings given an index
|
||||
// between 0 and maxVerbosityPreset, inclusive.
|
||||
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
||||
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
||||
if i > 0 {
|
||||
opts.AvoidStringer = true
|
||||
}
|
||||
if i >= maxVerbosityPreset {
|
||||
opts.PrintAddresses = true
|
||||
opts.QualifiedNames = true
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
||||
// is a textual representation of the differences detected in the former.
|
||||
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
opts = opts.WithVerbosity(1)
|
||||
} else if opts.verbosity() < 3 {
|
||||
opts = opts.WithVerbosity(3)
|
||||
}
|
||||
|
||||
// Check whether we have specialized formatting for this node.
|
||||
// This is not necessary, but helpful for producing more readable outputs.
|
||||
if opts.CanFormatDiffSlice(v) {
|
||||
return opts.FormatDiffSlice(v)
|
||||
}
|
||||
|
||||
var parentKind reflect.Kind
|
||||
if v.parent != nil && v.parent.TransformerName == "" {
|
||||
parentKind = v.parent.Type.Kind()
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
if v.NumDiff == 0 {
|
||||
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
||||
return textEllipsis
|
||||
} else if outx.Len() < outy.Len() {
|
||||
return outx
|
||||
} else {
|
||||
return outy
|
||||
}
|
||||
}
|
||||
|
||||
// Format unequal.
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
var list textList
|
||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
||||
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: '+', Value: outy})
|
||||
}
|
||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
||||
case diffRemoved:
|
||||
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
case diffInserted:
|
||||
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
}
|
||||
|
||||
// Register slice element to support cycle detection.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
||||
}
|
||||
|
||||
// Descend into the child value node.
|
||||
if v.TransformerName != "" {
|
||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
||||
return opts.FormatType(v.Type, out)
|
||||
} else {
|
||||
switch k := v.Type.Kind(); k {
|
||||
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Map:
|
||||
// Register map to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Ptr:
|
||||
// Register pointer to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.FormatDiff(v.Value, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
case reflect.Interface:
|
||||
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v cannot have children", k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
||||
// Derive record name based on the data structure kind.
|
||||
var name string
|
||||
var formatKey func(reflect.Value) string
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
name = "field"
|
||||
opts = opts.WithTypeMode(autoType)
|
||||
formatKey = func(v reflect.Value) string { return v.String() }
|
||||
case reflect.Slice, reflect.Array:
|
||||
name = "element"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(reflect.Value) string { return "" }
|
||||
case reflect.Map:
|
||||
name = "entry"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
||||
}
|
||||
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
} else {
|
||||
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
// Handle unification.
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical, diffRemoved, diffInserted:
|
||||
var list textList
|
||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
||||
for _, r := range recs {
|
||||
if len(list) == maxLen {
|
||||
deferredEllipsis = true
|
||||
break
|
||||
}
|
||||
|
||||
// Elide struct fields that are zero value.
|
||||
if k == reflect.Struct {
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
|
||||
case diffRemoved:
|
||||
isZero = r.Value.ValueX.IsZero()
|
||||
case diffInserted:
|
||||
isZero = r.Value.ValueY.IsZero()
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Elide ignored nodes.
|
||||
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
|
||||
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
|
||||
if !deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
continue
|
||||
}
|
||||
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
}
|
||||
}
|
||||
if deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case diffUnknown:
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
|
||||
// Handle differencing.
|
||||
var numDiffs int
|
||||
var list textList
|
||||
var keys []reflect.Value // invariant: len(list) == len(keys)
|
||||
groups := coalesceAdjacentRecords(name, recs)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle equal records.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing records to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numLo++
|
||||
}
|
||||
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
|
||||
numHi++ // Avoid pointless coalescing of a single equal record
|
||||
}
|
||||
|
||||
// Format the equal values.
|
||||
for _, r := range recs[:numLo] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
recs = recs[numEqual:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal records.
|
||||
for _, r := range recs[:ds.NumDiff()] {
|
||||
switch {
|
||||
case opts.CanFormatDiffSlice(r.Value):
|
||||
out := opts.FormatDiffSlice(r.Value)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i)
|
||||
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
default:
|
||||
out := opts.FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
}
|
||||
recs = recs[ds.NumDiff():]
|
||||
numDiffs += ds.NumDiff()
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(len(recs) == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
assert(len(list) == len(keys))
|
||||
|
||||
// For maps, the default formatting logic uses fmt.Stringer which may
|
||||
// produce ambiguous output. Avoid calling String to disambiguate.
|
||||
if k == reflect.Map {
|
||||
var ambiguous bool
|
||||
seenKeys := map[string]reflect.Value{}
|
||||
for i, currKey := range keys {
|
||||
if currKey.IsValid() {
|
||||
strKey := list[i].Key
|
||||
prevKey, seen := seenKeys[strKey]
|
||||
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
||||
ambiguous = prevKey.Interface() != currKey.Interface()
|
||||
if ambiguous {
|
||||
break
|
||||
}
|
||||
}
|
||||
seenKeys[strKey] = currKey
|
||||
}
|
||||
}
|
||||
if ambiguous {
|
||||
for i, k := range keys {
|
||||
if k.IsValid() {
|
||||
list[i].Key = formatMapKey(k, true, ptrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
}
|
||||
|
||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
||||
// adjacent equal, or unequal counts.
|
||||
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, r := range recs {
|
||||
switch rv := r.Value; {
|
||||
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
|
||||
lastStats(1).NumIgnored++
|
||||
case rv.NumDiff == 0:
|
||||
lastStats(1).NumIdentical++
|
||||
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
|
||||
lastStats(2).NumRemoved++
|
||||
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
|
||||
lastStats(2).NumInserted++
|
||||
default:
|
||||
lastStats(2).NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
|
@ -0,0 +1,264 @@
|
|||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
const (
|
||||
pointerDelimPrefix = "⟪"
|
||||
pointerDelimSuffix = "⟫"
|
||||
)
|
||||
|
||||
// formatPointer prints the address of the pointer.
|
||||
func formatPointer(p value.Pointer, withDelims bool) string {
|
||||
v := p.Uintptr()
|
||||
if flags.Deterministic {
|
||||
v = 0xdeadf00f // Only used for stable testing purposes
|
||||
}
|
||||
if withDelims {
|
||||
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
||||
}
|
||||
return formatHex(uint64(v))
|
||||
}
|
||||
|
||||
// pointerReferences is a stack of pointers visited so far.
|
||||
type pointerReferences [][2]value.Pointer
|
||||
|
||||
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
||||
if deref && vx.IsValid() {
|
||||
vx = vx.Addr()
|
||||
}
|
||||
if deref && vy.IsValid() {
|
||||
vy = vy.Addr()
|
||||
}
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
||||
case diffRemoved:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
||||
case diffInserted:
|
||||
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
||||
}
|
||||
*ps = append(*ps, pp)
|
||||
return pp
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
||||
p = value.PointerOf(v)
|
||||
for _, pp := range *ps {
|
||||
if p == pp[0] || p == pp[1] {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
*ps = append(*ps, [2]value.Pointer{p, p})
|
||||
return p, false
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Pop() {
|
||||
*ps = (*ps)[:len(*ps)-1]
|
||||
}
|
||||
|
||||
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for either pointer in a pair of references.
|
||||
type trunkReferences struct{ pp [2]value.Pointer }
|
||||
|
||||
// trunkReference is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for the given pointer reference.
|
||||
type trunkReference struct{ p value.Pointer }
|
||||
|
||||
// leafReference is metadata for a textNode indicating that the value is
|
||||
// truncated as it refers to another part of the tree (i.e., a trunk).
|
||||
type leafReference struct{ p value.Pointer }
|
||||
|
||||
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
||||
switch {
|
||||
case pp[0].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
||||
case pp[1].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
case pp[0] == pp[1]:
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
default:
|
||||
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
||||
}
|
||||
}
|
||||
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
||||
}
|
||||
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
||||
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
||||
}
|
||||
|
||||
// resolveReferences walks the textNode tree searching for any leaf reference
|
||||
// metadata and resolves each against the corresponding trunk references.
|
||||
// Since pointer addresses in memory are not particularly readable to the user,
|
||||
// it replaces each pointer value with an arbitrary and unique reference ID.
|
||||
func resolveReferences(s textNode) {
|
||||
var walkNodes func(textNode, func(textNode))
|
||||
walkNodes = func(s textNode, f func(textNode)) {
|
||||
f(s)
|
||||
switch s := s.(type) {
|
||||
case *textWrap:
|
||||
walkNodes(s.Value, f)
|
||||
case textList:
|
||||
for _, r := range s {
|
||||
walkNodes(r.Value, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect all trunks and leaves with reference metadata.
|
||||
var trunks, leaves []*textWrap
|
||||
walkNodes(s, func(s textNode) {
|
||||
if s, ok := s.(*textWrap); ok {
|
||||
switch s.Metadata.(type) {
|
||||
case leafReference:
|
||||
leaves = append(leaves, s)
|
||||
case trunkReference, trunkReferences:
|
||||
trunks = append(trunks, s)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// No leaf references to resolve.
|
||||
if len(leaves) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Collect the set of all leaf references to resolve.
|
||||
leafPtrs := make(map[value.Pointer]bool)
|
||||
for _, leaf := range leaves {
|
||||
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
||||
}
|
||||
|
||||
// Collect the set of trunk pointers that are always paired together.
|
||||
// This allows us to assign a single ID to both pointers for brevity.
|
||||
// If a pointer in a pair ever occurs by itself or as a different pair,
|
||||
// then the pair is broken.
|
||||
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
||||
unpair := func(p value.Pointer) {
|
||||
if !pairedTrunkPtrs[p].IsNil() {
|
||||
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
||||
}
|
||||
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
unpair(p.p) // standalone pointer cannot be part of a pair
|
||||
case trunkReferences:
|
||||
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
||||
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
||||
switch {
|
||||
case !ok0 && !ok1:
|
||||
// Register the newly seen pair.
|
||||
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
||||
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
||||
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
||||
// Exact pair already seen; do nothing.
|
||||
default:
|
||||
// Pair conflicts with some other pair; break all pairs.
|
||||
unpair(p.pp[0])
|
||||
unpair(p.pp[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correlate each pointer referenced by leaves to a unique identifier,
|
||||
// and print the IDs for each trunk that matches those pointers.
|
||||
var nextID uint
|
||||
ptrIDs := make(map[value.Pointer]uint)
|
||||
newID := func() uint {
|
||||
id := nextID
|
||||
nextID++
|
||||
return id
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
if print := leafPtrs[p.p]; print {
|
||||
id, ok := ptrIDs[p.p]
|
||||
if !ok {
|
||||
id = newID()
|
||||
ptrIDs[p.p] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
}
|
||||
case trunkReferences:
|
||||
print0 := leafPtrs[p.pp[0]]
|
||||
print1 := leafPtrs[p.pp[1]]
|
||||
if print0 || print1 {
|
||||
id0, ok0 := ptrIDs[p.pp[0]]
|
||||
id1, ok1 := ptrIDs[p.pp[1]]
|
||||
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
||||
if isPair {
|
||||
var id uint
|
||||
assert(ok0 == ok1) // must be seen together or not at all
|
||||
if ok0 {
|
||||
assert(id0 == id1) // must have the same ID
|
||||
id = id0
|
||||
} else {
|
||||
id = newID()
|
||||
ptrIDs[p.pp[0]] = id
|
||||
ptrIDs[p.pp[1]] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
} else {
|
||||
if print0 && !ok0 {
|
||||
id0 = newID()
|
||||
ptrIDs[p.pp[0]] = id0
|
||||
}
|
||||
if print1 && !ok1 {
|
||||
id1 = newID()
|
||||
ptrIDs[p.pp[1]] = id1
|
||||
}
|
||||
switch {
|
||||
case print0 && print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
||||
case print0:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
||||
case print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update all leaf references with the unique identifier.
|
||||
for _, leaf := range leaves {
|
||||
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
||||
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatReference(id uint) string {
|
||||
return fmt.Sprintf("ref#%d", id)
|
||||
}
|
||||
|
||||
func updateReferencePrefix(prefix, ref string) string {
|
||||
if prefix == "" {
|
||||
return pointerDelimPrefix + ref + pointerDelimSuffix
|
||||
}
|
||||
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
||||
return pointerDelimPrefix + ref + ": " + suffix
|
||||
}
|
414
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
414
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
|
@ -0,0 +1,414 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
var (
|
||||
anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
|
||||
byteType = reflect.TypeOf((*byte)(nil)).Elem()
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
AvoidStringer bool
|
||||
|
||||
// PrintAddresses controls whether to print the address of all pointers,
|
||||
// slice elements, and maps.
|
||||
PrintAddresses bool
|
||||
|
||||
// QualifiedNames controls whether FormatType uses the fully qualified name
|
||||
// (including the full package path as opposed to just the package name).
|
||||
QualifiedNames bool
|
||||
|
||||
// VerbosityLevel controls the amount of output to produce.
|
||||
// A higher value produces more output. A value of zero or lower produces
|
||||
// no output (represented using an ellipsis).
|
||||
// If LimitVerbosity is false, then the level is treated as infinite.
|
||||
VerbosityLevel int
|
||||
|
||||
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
||||
LimitVerbosity bool
|
||||
}
|
||||
|
||||
// FormatType prints the type as if it were wrapping s.
|
||||
// This may return s as-is depending on the current type and TypeMode mode.
|
||||
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
||||
// Check whether to emit the type or not.
|
||||
switch opts.TypeMode {
|
||||
case autoType:
|
||||
switch t.Kind() {
|
||||
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
|
||||
if s.Equal(textNil) {
|
||||
return s
|
||||
}
|
||||
default:
|
||||
return s
|
||||
}
|
||||
if opts.DiffMode == diffIdentical {
|
||||
return s // elide type for identical nodes
|
||||
}
|
||||
case elideType:
|
||||
return s
|
||||
}
|
||||
|
||||
// Determine the type label, applying special handling for unnamed types.
|
||||
typeName := value.TypeString(t, opts.QualifiedNames)
|
||||
if t.Name() == "" {
|
||||
// According to Go grammar, certain type literals contain symbols that
|
||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
||||
switch t.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
||||
typeName = "(" + typeName + ")"
|
||||
}
|
||||
}
|
||||
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
||||
}
|
||||
|
||||
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
||||
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
||||
// It handles unwrapping one level of pointer-reference nodes.
|
||||
func wrapParens(s textNode) textNode {
|
||||
var refNode *textWrap
|
||||
if s2, ok := s.(*textWrap); ok {
|
||||
// Unwrap a single pointer reference node.
|
||||
switch s2.Metadata.(type) {
|
||||
case leafReference, trunkReference, trunkReferences:
|
||||
refNode = s2
|
||||
if s3, ok := refNode.Value.(*textWrap); ok {
|
||||
s2 = s3
|
||||
}
|
||||
}
|
||||
|
||||
// Already has delimiters that make parenthesis unnecessary.
|
||||
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
||||
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
||||
if hasParens || hasBraces {
|
||||
return s
|
||||
}
|
||||
}
|
||||
if refNode != nil {
|
||||
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
||||
return s
|
||||
}
|
||||
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
||||
}
|
||||
|
||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
||||
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
||||
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
// Check slice element for cycles.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRef, visited := ptrs.Push(v.Addr())
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, false)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
||||
}
|
||||
|
||||
// Check whether there is an Error or String method to call.
|
||||
if !opts.AvoidStringer && v.CanInterface() {
|
||||
// Avoid calling Error or String methods on nil receivers since many
|
||||
// implementations crash when doing so.
|
||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
||||
var prefix, strVal string
|
||||
func() {
|
||||
// Swallow and ignore any panics from String or Error.
|
||||
defer func() { recover() }()
|
||||
switch v := v.Interface().(type) {
|
||||
case error:
|
||||
strVal = v.Error()
|
||||
prefix = "e"
|
||||
case fmt.Stringer:
|
||||
strVal = v.String()
|
||||
prefix = "s"
|
||||
}
|
||||
}()
|
||||
if prefix != "" {
|
||||
return opts.formatString(prefix, strVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether to explicitly wrap the result with the type.
|
||||
var skipType bool
|
||||
defer func() {
|
||||
if !skipType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}()
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return textLine(fmt.Sprint(v.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return textLine(fmt.Sprint(v.Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uint8:
|
||||
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
||||
return textLine(formatHex(v.Uint()))
|
||||
}
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return textLine(formatHex(v.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return textLine(fmt.Sprint(v.Float()))
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return textLine(fmt.Sprint(v.Complex()))
|
||||
case reflect.String:
|
||||
return opts.formatString("", v.String())
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
return textLine(formatPointer(value.PointerOf(v), true))
|
||||
case reflect.Struct:
|
||||
var list textList
|
||||
v := makeAddressable(v) // needed for retrieveUnexportedField
|
||||
maxLen := v.NumField()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if vv.IsZero() {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if supportExporters && !isExported(sf.Name) {
|
||||
vv = retrieveUnexportedField(v, sf, true)
|
||||
}
|
||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sf.Name, Value: s})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == byteType {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
skipType = true
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Value: s})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
||||
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
||||
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
||||
}
|
||||
return out
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sk := formatMapKey(k, false, ptrs)
|
||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sk, Value: sv})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
return out
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
return &textWrap{Prefix: "&", Value: out}
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
// Skip the name only if this is an unnamed pointer type.
|
||||
// Otherwise taking the address of a value does not reproduce
|
||||
// the named pointer type.
|
||||
if v.Type().Name() == "" {
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
}
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
return out
|
||||
case reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatString(prefix, s string) textNode {
|
||||
maxLen := len(s)
|
||||
maxLines := strings.Count(s, "\n") + 1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
|
||||
// For multiline strings, use the triple-quote syntax,
|
||||
// but only use it when printing removed or inserted nodes since
|
||||
// we only want the extra verbosity for those cases.
|
||||
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
|
||||
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
|
||||
for i := 0; i < len(lines) && isTripleQuoted; i++ {
|
||||
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
line := lines[i]
|
||||
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
|
||||
}
|
||||
if isTripleQuoted {
|
||||
var list textList
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
for i, line := range lines {
|
||||
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
|
||||
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
|
||||
break
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
|
||||
}
|
||||
|
||||
// Format the string as a single-line quoted string.
|
||||
if len(s) > maxLen+len(textEllipsis) {
|
||||
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
|
||||
}
|
||||
return textLine(prefix + formatString(s))
|
||||
}
|
||||
|
||||
// formatMapKey formats v as if it were a map key.
|
||||
// The result is guaranteed to be a single line.
|
||||
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
||||
var opts formatOptions
|
||||
opts.DiffMode = diffIdentical
|
||||
opts.TypeMode = elideType
|
||||
opts.PrintAddresses = disambiguate
|
||||
opts.AvoidStringer = disambiguate
|
||||
opts.QualifiedNames = disambiguate
|
||||
opts.VerbosityLevel = maxVerbosityPreset
|
||||
opts.LimitVerbosity = true
|
||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// formatString prints s as a double-quoted or backtick-quoted string.
|
||||
func formatString(s string) string {
|
||||
// Use quoted string if it the same length as a raw string literal.
|
||||
// Otherwise, attempt to use the raw string form.
|
||||
qs := strconv.Quote(s)
|
||||
if len(qs) == 1+len(s)+1 {
|
||||
return qs
|
||||
}
|
||||
|
||||
// Disallow newlines to ensure output is a single line.
|
||||
// Only allow printable runes for readability purposes.
|
||||
rawInvalid := func(r rune) bool {
|
||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
||||
}
|
||||
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
||||
return "`" + s + "`"
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
// formatHex prints u as a hexadecimal integer in Go notation.
|
||||
func formatHex(u uint64) string {
|
||||
var f string
|
||||
switch {
|
||||
case u <= 0xff:
|
||||
f = "0x%02x"
|
||||
case u <= 0xffff:
|
||||
f = "0x%04x"
|
||||
case u <= 0xffffff:
|
||||
f = "0x%06x"
|
||||
case u <= 0xffffffff:
|
||||
f = "0x%08x"
|
||||
case u <= 0xffffffffff:
|
||||
f = "0x%010x"
|
||||
case u <= 0xffffffffffff:
|
||||
f = "0x%012x"
|
||||
case u <= 0xffffffffffffff:
|
||||
f = "0x%014x"
|
||||
case u <= 0xffffffffffffffff:
|
||||
f = "0x%016x"
|
||||
}
|
||||
return fmt.Sprintf(f, u)
|
||||
}
|
614
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
614
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
|
@ -0,0 +1,614 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
)
|
||||
|
||||
// CanFormatDiffSlice reports whether we support custom formatting for nodes
|
||||
// that are slices of primitive kinds or strings.
|
||||
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
switch {
|
||||
case opts.DiffMode != diffUnknown:
|
||||
return false // Must be formatting in diff mode
|
||||
case v.NumDiff == 0:
|
||||
return false // No differences detected
|
||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
||||
return false // Both values must be valid
|
||||
case v.NumIgnored > 0:
|
||||
return false // Some ignore option was used
|
||||
case v.NumTransformed > 0:
|
||||
return false // Some transform option was used
|
||||
case v.NumCompared > 1:
|
||||
return false // More than one comparison was used
|
||||
case v.NumCompared == 1 && v.Type.Name() != "":
|
||||
// The need for cmp to check applicability of options on every element
|
||||
// in a slice is a significant performance detriment for large []byte.
|
||||
// The workaround is to specify Comparer(bytes.Equal),
|
||||
// which enables cmp to compare []byte more efficiently.
|
||||
// If they differ, we still want to provide batched diffing.
|
||||
// The logic disallows named types since they tend to have their own
|
||||
// String method, with nicer formatting than what this provides.
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether this is an interface with the same concrete types.
|
||||
t := v.Type
|
||||
vx, vy := v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
// Check whether we provide specialized diffing for this type.
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Only slices of primitive types have specialized handling.
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Both slice values have to be non-empty.
|
||||
if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If a sufficient number of elements already differ,
|
||||
// use specialized formatting even if length requirement is not met.
|
||||
if v.NumDiff > v.NumSame {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 32
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
|
||||
// This provides custom-tailored logic to make printing of differences in
|
||||
// textual strings and slices of primitive kinds more readable.
|
||||
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
t, vx, vy := v.Type, v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var sx, sy string
|
||||
var ssx, ssy []string
|
||||
var isString, isMostlyText, isPureLinedText, isBinary bool
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == byteType:
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
vx2.Set(vx)
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isString {
|
||||
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
|
||||
for i, r := range sx + sy {
|
||||
numTotalRunes++
|
||||
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
|
||||
numValidRunes++
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
maxLineLen = i - lastLineIdx
|
||||
}
|
||||
lastLineIdx = i + 1
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isPureText := numValidRunes == numTotalRunes
|
||||
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
|
||||
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
|
||||
isBinary = !isMostlyText
|
||||
|
||||
// Avoid diffing by lines if it produces a significantly more complex
|
||||
// edit script than diffing by bytes.
|
||||
if isPureLinedText {
|
||||
ssx = strings.Split(sx, "\n")
|
||||
ssy = strings.Split(sy, "\n")
|
||||
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(ssx[ix] == ssy[iy])
|
||||
})
|
||||
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(sx[ix] == sy[iy])
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
quotedLength := len(strconv.Quote(sx + sy))
|
||||
unquotedLength := len(sx) + len(sy)
|
||||
escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
|
||||
}
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
var list textList
|
||||
var delim string
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isPureLinedText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.Index(0).String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = "\n"
|
||||
|
||||
// If possible, use a custom triple-quote (""") syntax for printing
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// - A line starts with `"""`
|
||||
// - A line starts with "..."
|
||||
// - A line contains non-printable characters
|
||||
// - Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
// bar
|
||||
// - baz
|
||||
// + BAZ
|
||||
// """
|
||||
isTripleQuoted := true
|
||||
prevRemoveLines := map[string]bool{}
|
||||
prevInsertLines := map[string]bool{}
|
||||
var list2 textList
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
for _, r := range list {
|
||||
if !r.Value.Equal(textEllipsis) {
|
||||
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
||||
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
normLine := strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return -1 // drop whitespace to avoid visually indistinguishable output
|
||||
}
|
||||
return r
|
||||
}, line)
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
||||
switch r.Diff {
|
||||
case diffRemoved:
|
||||
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
||||
prevRemoveLines[normLine] = true
|
||||
case diffInserted:
|
||||
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
||||
prevInsertLines[normLine] = true
|
||||
}
|
||||
if !isTripleQuoted {
|
||||
break
|
||||
}
|
||||
r.Value = textLine(line)
|
||||
r.ElideComma = true
|
||||
}
|
||||
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
||||
prevRemoveLines = map[string]bool{}
|
||||
prevInsertLines = map[string]bool{}
|
||||
}
|
||||
list2 = append(list2, r)
|
||||
}
|
||||
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
||||
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
||||
}
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
if isTripleQuoted {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Always emit type for slices since the triple-quote syntax
|
||||
// looks like a string (not a slice).
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isMostlyText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is inspired by hexdump.
|
||||
case isBinary:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
|
||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
||||
},
|
||||
)
|
||||
|
||||
// For all other slices of primitive types,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The size of each chunk depends on the width of the element kind.
|
||||
default:
|
||||
var chunkSize int
|
||||
if t.Elem().Kind() == reflect.Bool {
|
||||
chunkSize = 16
|
||||
} else {
|
||||
switch t.Elem().Bits() {
|
||||
case 8:
|
||||
chunkSize = 16
|
||||
case 16:
|
||||
chunkSize = 12
|
||||
case 32:
|
||||
chunkSize = 8
|
||||
default:
|
||||
chunkSize = 8
|
||||
}
|
||||
}
|
||||
list = opts.formatDiffSlice(
|
||||
vx, vy, chunkSize, t.Elem().Kind().String(),
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
||||
case reflect.Uint8, reflect.Uintptr:
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
||||
}
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isMostlyText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != bytesType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// formatASCII formats s as an ASCII string.
|
||||
// This is useful for printing binary strings in a semi-legible way.
|
||||
func formatASCII(s string) string {
|
||||
b := bytes.Repeat([]byte{'.'}, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
if ' ' <= s[i] && s[i] <= '~' {
|
||||
b[i] = s[i]
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
eq := func(ix, iy int) bool {
|
||||
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
|
||||
}
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(eq(ix, iy))
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
n0 := v.Len()
|
||||
for v.Len() > 0 {
|
||||
n := chunkSize
|
||||
if n > v.Len() {
|
||||
n = v.Len()
|
||||
}
|
||||
list = append(list, makeRec(v.Slice(0, n), d))
|
||||
v = v.Slice(n, v.Len())
|
||||
}
|
||||
return n0 - v.Len()
|
||||
}
|
||||
|
||||
var numDiffs int
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
groups = cleanupSurroundingIdentical(groups, eq)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Print equal.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing equal bytes to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
numLo++
|
||||
}
|
||||
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
|
||||
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
|
||||
}
|
||||
|
||||
// Print the equal bytes.
|
||||
appendChunks(vx.Slice(0, numLo), diffIdentical)
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
}
|
||||
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
|
||||
vx = vx.Slice(numEqual, vx.Len())
|
||||
vy = vy.Slice(numEqual, vy.Len())
|
||||
continue
|
||||
}
|
||||
|
||||
// Print unequal.
|
||||
len0 := len(list)
|
||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
||||
vx = vx.Slice(nx, vx.Len())
|
||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
||||
vy = vy.Slice(ny, vy.Len())
|
||||
numDiffs += len(list) - len0
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: "..XXY...Y"
|
||||
// Output: [
|
||||
// {NumIdentical: 2},
|
||||
// {NumRemoved: 2, NumInserted 1},
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
if prevMode != mode {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevMode = mode
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats('=').NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats('!').NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats('!').NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats('!').NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// WindowSize: 16,
|
||||
// Input: [
|
||||
// {NumIdentical: 61}, // group 0
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 1
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 9}, // └── coalesce
|
||||
// {NumIdentical: 64}, // group 2
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 3
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 7}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 2}, // └── coalesce
|
||||
// {NumIdentical: 63}, // group 4
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 64},
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
if len(groups) >= 2 && ds.NumDiff() > 0 {
|
||||
prev := &groups[len(groups)-2] // Unequal group
|
||||
curr := &groups[len(groups)-1] // Equal group
|
||||
next := &groupsOrig[i] // Unequal group
|
||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
||||
*prev = prev.Append(*curr).Append(*next)
|
||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
||||
continue
|
||||
}
|
||||
}
|
||||
groups = append(groups, ds)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// cleanupSurroundingIdentical scans through all unequal groups, and
|
||||
// moves any leading sequence of equal elements to the preceding equal group and
|
||||
// moves and trailing sequence of equal elements to the succeeding equal group.
|
||||
//
|
||||
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
|
||||
// together such that leading/trailing spans of equal elements becomes possible.
|
||||
// Note that this can occur even with an optimal diffing algorithm.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
|
||||
// {NumIdentical: 67},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
|
||||
// {NumIdentical: 54},
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 64}, // incremented by 3
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 67},
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
// Handle equal group.
|
||||
if ds.NumDiff() == 0 {
|
||||
ix += ds.NumIdentical
|
||||
iy += ds.NumIdentical
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal group.
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
if numLeadingIdentical > 0 {
|
||||
// Remove leading identical span from this group and
|
||||
// insert it into the preceding group.
|
||||
if i-1 >= 0 {
|
||||
groups[i-1].NumIdentical += numLeadingIdentical
|
||||
} else {
|
||||
// No preceding group exists, so prepend a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
|
||||
}()
|
||||
}
|
||||
// Increment indexes since the preceding group would have handled this.
|
||||
ix += numLeadingIdentical
|
||||
iy += numLeadingIdentical
|
||||
}
|
||||
if numTrailingIdentical > 0 {
|
||||
// Remove trailing identical span from this group and
|
||||
// insert it into the succeeding group.
|
||||
if i+1 < len(groups) {
|
||||
groups[i+1].NumIdentical += numTrailingIdentical
|
||||
} else {
|
||||
// No succeeding group exists, so append a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
|
||||
}()
|
||||
}
|
||||
// Do not increment indexes since the succeeding group will handle this.
|
||||
}
|
||||
|
||||
// Update this group since some identical elements were removed.
|
||||
nx -= numIdentical
|
||||
ny -= numIdentical
|
||||
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
|
||||
}
|
||||
ix += nx
|
||||
iy += ny
|
||||
}
|
||||
return groups
|
||||
}
|
432
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
432
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
|
@ -0,0 +1,432 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
const maxColumnLength = 80
|
||||
|
||||
type indentMode int
|
||||
|
||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||
// The output of Diff is documented as being unstable to provide future
|
||||
// flexibility in changing the output for more humanly readable reports.
|
||||
// This logic intentionally introduces instability to the exact output
|
||||
// so that users can detect accidental reliance on stability early on,
|
||||
// rather than much later when an actual change to the format occurs.
|
||||
if flags.Deterministic || randBool {
|
||||
// Use regular spaces (U+0020).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
} else {
|
||||
// Use non-breaking spaces (U+00a0).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
}
|
||||
return repeatCount(n).appendChar(b, '\t')
|
||||
}
|
||||
|
||||
type repeatCount int
|
||||
|
||||
func (n repeatCount) appendChar(b []byte, c byte) []byte {
|
||||
for ; n > 0; n-- {
|
||||
b = append(b, c)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// textNode is a simplified tree-based representation of structured text.
|
||||
// Possible node types are textWrap, textList, or textLine.
|
||||
type textNode interface {
|
||||
// Len reports the length in bytes of a single-line version of the tree.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
|
||||
Len() int
|
||||
// Equal reports whether the two trees are structurally identical.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are compared.
|
||||
Equal(textNode) bool
|
||||
// String returns the string representation of the text tree.
|
||||
// It is not guaranteed that len(x.String()) == x.Len(),
|
||||
// nor that x.String() == y.String() implies that x.Equal(y).
|
||||
String() string
|
||||
|
||||
// formatCompactTo formats the contents of the tree as a single-line string
|
||||
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
|
||||
// fields are ignored.
|
||||
//
|
||||
// However, not all nodes in the tree should be collapsed as a single-line.
|
||||
// If a node can be collapsed as a single-line, it is replaced by a textLine
|
||||
// node. Since the top-level node cannot replace itself, this also returns
|
||||
// the current node itself.
|
||||
//
|
||||
// This does not mutate the receiver.
|
||||
formatCompactTo([]byte, diffMode) ([]byte, textNode)
|
||||
// formatExpandedTo formats the contents of the tree as a multi-line string
|
||||
// to the provided buffer. In order for column alignment to operate well,
|
||||
// formatCompactTo must be called before calling formatExpandedTo.
|
||||
formatExpandedTo([]byte, diffMode, indentMode) []byte
|
||||
}
|
||||
|
||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
||||
// to the underlying node.
|
||||
type textWrap struct {
|
||||
Prefix string // e.g., "bytes.Buffer{"
|
||||
Value textNode // textWrap | textList | textLine
|
||||
Suffix string // e.g., "}"
|
||||
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
||||
}
|
||||
|
||||
func (s *textWrap) Len() int {
|
||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
||||
}
|
||||
func (s1 *textWrap) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(*textWrap); ok {
|
||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s *textWrap) String() string {
|
||||
var d diffMode
|
||||
var n indentMode
|
||||
_, s2 := s.formatCompactTo(nil, d)
|
||||
b := n.appendIndent(nil, d) // Leading indent
|
||||
b = s2.formatExpandedTo(b, d, n) // Main body
|
||||
b = append(b, '\n') // Trailing newline
|
||||
return string(b)
|
||||
}
|
||||
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
n0 := len(b) // Original buffer length
|
||||
b = append(b, s.Prefix...)
|
||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
||||
b = append(b, s.Suffix...)
|
||||
if _, ok := s.Value.(textLine); ok {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
b = append(b, s.Prefix...)
|
||||
b = s.Value.formatExpandedTo(b, d, n)
|
||||
b = append(b, s.Suffix...)
|
||||
return b
|
||||
}
|
||||
|
||||
// textList is a comma-separated list of textWrap or textLine nodes.
|
||||
// The list may be formatted as multi-lines or single-line at the discretion
|
||||
// of the textList.formatCompactTo method.
|
||||
type textList []textRecord
|
||||
type textRecord struct {
|
||||
Diff diffMode // e.g., 0 or '-' or '+'
|
||||
Key string // e.g., "MyField"
|
||||
Value textNode // textWrap | textLine
|
||||
ElideComma bool // avoid trailing comma
|
||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
||||
}
|
||||
|
||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
||||
// previous diffStats.
|
||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
||||
hasStats := !ds.IsZero()
|
||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
||||
if hasStats {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
||||
} else {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
||||
}
|
||||
return
|
||||
}
|
||||
if hasStats {
|
||||
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func (s textList) Len() (n int) {
|
||||
for i, r := range s {
|
||||
n += len(r.Key)
|
||||
if r.Key != "" {
|
||||
n += len(": ")
|
||||
}
|
||||
n += r.Value.Len()
|
||||
if i < len(s)-1 {
|
||||
n += len(", ")
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (s1 textList) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textList); ok {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
r1, r2 := s1[i], s2[i]
|
||||
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s textList) String() string {
|
||||
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
||||
}
|
||||
|
||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
s = append(textList(nil), s...) // Avoid mutating original
|
||||
|
||||
// Determine whether we can collapse this list as a single line.
|
||||
n0 := len(b) // Original buffer length
|
||||
var multiLine bool
|
||||
for i, r := range s {
|
||||
if r.Diff == diffInserted || r.Diff == diffRemoved {
|
||||
multiLine = true
|
||||
}
|
||||
b = append(b, r.Key...)
|
||||
if r.Key != "" {
|
||||
b = append(b, ": "...)
|
||||
}
|
||||
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
|
||||
if _, ok := s[i].Value.(textLine); !ok {
|
||||
multiLine = true
|
||||
}
|
||||
if r.Comment != nil {
|
||||
multiLine = true
|
||||
}
|
||||
if i < len(s)-1 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
}
|
||||
// Force multi-lined output when printing a removed/inserted node that
|
||||
// is sufficiently long.
|
||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
||||
multiLine = true
|
||||
}
|
||||
if !multiLine {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
|
||||
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
alignKeyLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return r.Key == "" || !isLine
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
||||
)
|
||||
alignValueLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
||||
)
|
||||
|
||||
// Format lists of simple lists in a batched form.
|
||||
// If the list is sequence of only textLine values,
|
||||
// then batch multiple values on a single line.
|
||||
var isSimple bool
|
||||
for _, r := range s {
|
||||
_, isLine := r.Value.(textLine)
|
||||
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
||||
if !isSimple {
|
||||
break
|
||||
}
|
||||
}
|
||||
if isSimple {
|
||||
n++
|
||||
var batch []byte
|
||||
emitBatch := func() {
|
||||
if len(batch) > 0 {
|
||||
b = n.appendIndent(append(b, '\n'), d)
|
||||
b = append(b, bytes.TrimRight(batch, " ")...)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
for _, r := range s {
|
||||
line := r.Value.(textLine)
|
||||
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
||||
emitBatch()
|
||||
}
|
||||
batch = append(batch, line...)
|
||||
batch = append(batch, ", "...)
|
||||
}
|
||||
emitBatch()
|
||||
n--
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
// Format the list as a multi-lined output.
|
||||
n++
|
||||
for i, r := range s {
|
||||
b = n.appendIndent(append(b, '\n'), d|r.Diff)
|
||||
if r.Key != "" {
|
||||
b = append(b, r.Key+": "...)
|
||||
}
|
||||
b = alignKeyLens[i].appendChar(b, ' ')
|
||||
|
||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
||||
if !r.ElideComma {
|
||||
b = append(b, ',')
|
||||
}
|
||||
b = alignValueLens[i].appendChar(b, ' ')
|
||||
|
||||
if r.Comment != nil {
|
||||
b = append(b, " // "+r.Comment.String()...)
|
||||
}
|
||||
}
|
||||
n--
|
||||
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
func (s textList) alignLens(
|
||||
skipFunc func(textRecord) bool,
|
||||
lenFunc func(textRecord) int,
|
||||
) []repeatCount {
|
||||
var startIdx, endIdx, maxLen int
|
||||
lens := make([]repeatCount, len(s))
|
||||
for i, r := range s {
|
||||
if skipFunc(r) {
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
startIdx, endIdx, maxLen = i+1, i+1, 0
|
||||
} else {
|
||||
if maxLen < lenFunc(r) {
|
||||
maxLen = lenFunc(r)
|
||||
}
|
||||
endIdx = i + 1
|
||||
}
|
||||
}
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
return lens
|
||||
}
|
||||
|
||||
// textLine is a single-line segment of text and is always a leaf node
|
||||
// in the textNode tree.
|
||||
type textLine []byte
|
||||
|
||||
var (
|
||||
textNil = textLine("nil")
|
||||
textEllipsis = textLine("...")
|
||||
)
|
||||
|
||||
func (s textLine) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s1 textLine) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textLine); ok {
|
||||
return bytes.Equal([]byte(s1), []byte(s2))
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s textLine) String() string {
|
||||
return string(s)
|
||||
}
|
||||
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
return append(b, s...), s
|
||||
}
|
||||
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
|
||||
return append(b, s...)
|
||||
}
|
||||
|
||||
type diffStats struct {
|
||||
Name string
|
||||
NumIgnored int
|
||||
NumIdentical int
|
||||
NumRemoved int
|
||||
NumInserted int
|
||||
NumModified int
|
||||
}
|
||||
|
||||
func (s diffStats) IsZero() bool {
|
||||
s.Name = ""
|
||||
return s == diffStats{}
|
||||
}
|
||||
|
||||
func (s diffStats) NumDiff() int {
|
||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
||||
}
|
||||
|
||||
func (s diffStats) Append(ds diffStats) diffStats {
|
||||
assert(s.Name == ds.Name)
|
||||
s.NumIgnored += ds.NumIgnored
|
||||
s.NumIdentical += ds.NumIdentical
|
||||
s.NumRemoved += ds.NumRemoved
|
||||
s.NumInserted += ds.NumInserted
|
||||
s.NumModified += ds.NumModified
|
||||
return s
|
||||
}
|
||||
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
var sum int
|
||||
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
|
||||
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
|
||||
for i, n := range counts {
|
||||
if n > 0 {
|
||||
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
|
||||
}
|
||||
sum += n
|
||||
}
|
||||
|
||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
||||
name := s.Name
|
||||
if sum > 1 {
|
||||
name += "s"
|
||||
if strings.HasSuffix(name, "ys") {
|
||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
||||
}
|
||||
}
|
||||
|
||||
// Format the list according to English grammar (with Oxford comma).
|
||||
switch n := len(ss); n {
|
||||
case 0:
|
||||
return ""
|
||||
case 1, 2:
|
||||
return strings.Join(ss, " and ") + " " + name
|
||||
default:
|
||||
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
|
||||
}
|
||||
}
|
||||
|
||||
type commentString string
|
||||
|
||||
func (s commentString) String() string { return string(s) }
|
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
// valueNode represents a single node within a report, which is a
|
||||
// structured representation of the value tree, containing information
|
||||
// regarding which nodes are equal or not.
|
||||
type valueNode struct {
|
||||
parent *valueNode
|
||||
|
||||
Type reflect.Type
|
||||
ValueX reflect.Value
|
||||
ValueY reflect.Value
|
||||
|
||||
// NumSame is the number of leaf nodes that are equal.
|
||||
// All descendants are equal only if NumDiff is 0.
|
||||
NumSame int
|
||||
// NumDiff is the number of leaf nodes that are not equal.
|
||||
NumDiff int
|
||||
// NumIgnored is the number of leaf nodes that are ignored.
|
||||
NumIgnored int
|
||||
// NumCompared is the number of leaf nodes that were compared
|
||||
// using an Equal method or Comparer function.
|
||||
NumCompared int
|
||||
// NumTransformed is the number of non-leaf nodes that were transformed.
|
||||
NumTransformed int
|
||||
// NumChildren is the number of transitive descendants of this node.
|
||||
// This counts from zero; thus, leaf nodes have no descendants.
|
||||
NumChildren int
|
||||
// MaxDepth is the maximum depth of the tree. This counts from zero;
|
||||
// thus, leaf nodes have a depth of zero.
|
||||
MaxDepth int
|
||||
|
||||
// Records is a list of struct fields, slice elements, or map entries.
|
||||
Records []reportRecord // If populated, implies Value is not populated
|
||||
|
||||
// Value is the result of a transformation, pointer indirect, of
|
||||
// type assertion.
|
||||
Value *valueNode // If populated, implies Records is not populated
|
||||
|
||||
// TransformerName is the name of the transformer.
|
||||
TransformerName string // If non-empty, implies Value is populated
|
||||
}
|
||||
type reportRecord struct {
|
||||
Key reflect.Value // Invalid for slice element
|
||||
Value *valueNode
|
||||
}
|
||||
|
||||
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
|
||||
vx, vy := ps.Values()
|
||||
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
|
||||
switch s := ps.(type) {
|
||||
case StructField:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
|
||||
case SliceIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Value: child})
|
||||
case MapIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
|
||||
case Indirect:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case TypeAssertion:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case Transform:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
parent.TransformerName = s.Name()
|
||||
parent.NumTransformed++
|
||||
default:
|
||||
assert(parent == nil) // Must be the root step
|
||||
}
|
||||
return child
|
||||
}
|
||||
|
||||
func (r *valueNode) Report(rs Result) {
|
||||
assert(r.MaxDepth == 0) // May only be called on leaf nodes
|
||||
|
||||
if rs.ByIgnore() {
|
||||
r.NumIgnored++
|
||||
} else {
|
||||
if rs.Equal() {
|
||||
r.NumSame++
|
||||
} else {
|
||||
r.NumDiff++
|
||||
}
|
||||
}
|
||||
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
|
||||
|
||||
if rs.ByMethod() {
|
||||
r.NumCompared++
|
||||
}
|
||||
if rs.ByFunc() {
|
||||
r.NumCompared++
|
||||
}
|
||||
assert(r.NumCompared <= 1)
|
||||
}
|
||||
|
||||
func (child *valueNode) PopStep() (parent *valueNode) {
|
||||
if child.parent == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child.parent
|
||||
parent.NumSame += child.NumSame
|
||||
parent.NumDiff += child.NumDiff
|
||||
parent.NumIgnored += child.NumIgnored
|
||||
parent.NumCompared += child.NumCompared
|
||||
parent.NumTransformed += child.NumTransformed
|
||||
parent.NumChildren += child.NumChildren + 1
|
||||
if parent.MaxDepth < child.MaxDepth+1 {
|
||||
parent.MaxDepth = child.MaxDepth + 1
|
||||
}
|
||||
return parent
|
||||
}
|
202
vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
generated
vendored
Normal file
202
vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
185
vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
generated
vendored
Normal file
185
vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
// Copyright 2022 Google LLC.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner").
|
||||
//
|
||||
// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use.
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/rpc"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/googleapis/enterprise-certificate-proxy/client/util"
|
||||
)
|
||||
|
||||
const signAPI = "EnterpriseCertSigner.Sign"
|
||||
const certificateChainAPI = "EnterpriseCertSigner.CertificateChain"
|
||||
const publicKeyAPI = "EnterpriseCertSigner.Public"
|
||||
|
||||
// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser.
|
||||
type Connection struct {
|
||||
io.ReadCloser
|
||||
io.WriteCloser
|
||||
}
|
||||
|
||||
// Close closes c's underlying ReadCloser and WriteCloser.
|
||||
func (c *Connection) Close() error {
|
||||
rerr := c.ReadCloser.Close()
|
||||
werr := c.WriteCloser.Close()
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
return werr
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(crypto.SHA256)
|
||||
gob.Register(&rsa.PSSOptions{})
|
||||
}
|
||||
|
||||
// SignArgs contains arguments to a crypto Signer.Sign method.
|
||||
type SignArgs struct {
|
||||
Digest []byte // The content to sign.
|
||||
Opts crypto.SignerOpts // Options for signing, such as Hash identifier.
|
||||
}
|
||||
|
||||
// Key implements credential.Credential by holding the executed signer subprocess.
|
||||
type Key struct {
|
||||
cmd *exec.Cmd // Pointer to the signer subprocess.
|
||||
client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess.
|
||||
publicKey crypto.PublicKey // Public key of loaded certificate.
|
||||
chain [][]byte // Certificate chain of loaded certificate.
|
||||
}
|
||||
|
||||
// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key.
|
||||
func (k *Key) CertificateChain() [][]byte {
|
||||
return k.chain
|
||||
}
|
||||
|
||||
// Close closes the RPC connection and kills the signer subprocess.
|
||||
// Call this to free up resources when the Key object is no longer needed.
|
||||
func (k *Key) Close() error {
|
||||
if err := k.cmd.Process.Kill(); err != nil {
|
||||
return fmt.Errorf("failed to kill signer process: %w", err)
|
||||
}
|
||||
// Wait for cmd to exit and release resources. Since the process is forcefully killed, this
|
||||
// will return a non-nil error (varies by OS), which we will ignore.
|
||||
_ = k.cmd.Wait()
|
||||
// The Pipes connecting the RPC client should have been closed when the signer subprocess was killed.
|
||||
// Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault.
|
||||
if err := k.client.Close(); err.Error() != "close |0: file already closed" {
|
||||
return fmt.Errorf("failed to close RPC connection: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Public returns the public key for this Key.
|
||||
func (k *Key) Public() crypto.PublicKey {
|
||||
return k.publicKey
|
||||
}
|
||||
|
||||
// Sign signs a message digest, using the specified signer options.
|
||||
func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) {
|
||||
if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() {
|
||||
return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size())
|
||||
}
|
||||
err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed)
|
||||
return
|
||||
}
|
||||
|
||||
// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable,
|
||||
// possibly due to missing config or missing binary path.
|
||||
var ErrCredUnavailable = errors.New("Cred is unavailable")
|
||||
|
||||
// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate
|
||||
// related operations, including signing messages with the private key.
|
||||
//
|
||||
// The signer binary path is read from the specified configFilePath, if provided.
|
||||
// Otherwise, use the default config file path.
|
||||
//
|
||||
// The config file also specifies which certificate the signer should use.
|
||||
func Cred(configFilePath string) (*Key, error) {
|
||||
if configFilePath == "" {
|
||||
configFilePath = util.GetDefaultConfigFilePath()
|
||||
}
|
||||
enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrConfigUnavailable) {
|
||||
return nil, ErrCredUnavailable
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
k := &Key{
|
||||
cmd: exec.Command(enterpriseCertSignerPath, configFilePath),
|
||||
}
|
||||
|
||||
// Redirect errors from subprocess to parent process.
|
||||
k.cmd.Stderr = os.Stderr
|
||||
|
||||
// RPC client will communicate with subprocess over stdin/stdout.
|
||||
kin, err := k.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kout, err := k.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.client = rpc.NewClient(&Connection{kout, kin})
|
||||
|
||||
if err := k.cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err)
|
||||
}
|
||||
|
||||
if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err)
|
||||
}
|
||||
|
||||
var publicKeyBytes []byte
|
||||
if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve public key: %w", err)
|
||||
}
|
||||
|
||||
publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse public key: %w", err)
|
||||
}
|
||||
|
||||
var ok bool
|
||||
k.publicKey, ok = publicKey.(crypto.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid public key type: %T", publicKey)
|
||||
}
|
||||
|
||||
switch pub := k.publicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if pub.Size() < 256 {
|
||||
return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8)
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported public key type: %v", pub)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
91
vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
generated
vendored
Normal file
91
vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2022 Google LLC.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package util provides helper functions for the client.
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const configFileName = "certificate_config.json"
|
||||
|
||||
// EnterpriseCertificateConfig contains parameters for initializing signer.
|
||||
type EnterpriseCertificateConfig struct {
|
||||
Libs Libs `json:"libs"`
|
||||
}
|
||||
|
||||
// Libs specifies the locations of helper libraries.
|
||||
type Libs struct {
|
||||
ECP string `json:"ecp"`
|
||||
}
|
||||
|
||||
// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable,
|
||||
// possibly due to entire config missing or missing binary path.
|
||||
var ErrConfigUnavailable = errors.New("Config is unavailable")
|
||||
|
||||
// LoadSignerBinaryPath retrieves the path of the signer binary from the config file.
|
||||
func LoadSignerBinaryPath(configFilePath string) (path string, err error) {
|
||||
jsonFile, err := os.Open(configFilePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return "", ErrConfigUnavailable
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
byteValue, err := io.ReadAll(jsonFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var config EnterpriseCertificateConfig
|
||||
err = json.Unmarshal(byteValue, &config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
signerBinaryPath := config.Libs.ECP
|
||||
if signerBinaryPath == "" {
|
||||
return "", ErrConfigUnavailable
|
||||
}
|
||||
return signerBinaryPath, nil
|
||||
}
|
||||
|
||||
func guessHomeDir() string {
|
||||
// Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
|
||||
if v := os.Getenv("HOME"); v != "" {
|
||||
return v
|
||||
}
|
||||
// Else, fall back to user.Current:
|
||||
if u, err := user.Current(); err == nil {
|
||||
return u.HomeDir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getDefaultConfigFileDirectory() (directory string) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("APPDATA"), "gcloud")
|
||||
}
|
||||
return filepath.Join(guessHomeDir(), ".config/gcloud")
|
||||
}
|
||||
|
||||
// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud.
|
||||
func GetDefaultConfigFilePath() (path string) {
|
||||
return filepath.Join(getDefaultConfigFileDirectory(), configFileName)
|
||||
}
|
3
vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
generated
vendored
Normal file
3
vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"v2": "2.7.1"
|
||||
}
|
54
vendor/github.com/googleapis/gax-go/v2/CHANGES.md
generated
vendored
Normal file
54
vendor/github.com/googleapis/gax-go/v2/CHANGES.md
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
# Changelog
|
||||
|
||||
## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254)
|
||||
|
||||
## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6))
|
||||
* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f))
|
||||
|
||||
## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3))
|
||||
|
||||
## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23))
|
||||
|
||||
## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb))
|
||||
|
||||
## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e))
|
||||
|
||||
|
||||
### Miscellaneous Chores
|
||||
|
||||
* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719))
|
347
vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
generated
vendored
Normal file
347
vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
generated
vendored
Normal file
|
@ -0,0 +1,347 @@
|
|||
// Copyright 2021, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package apierror implements a wrapper error for parsing error details from
|
||||
// API calls. Both HTTP & gRPC status errors are supported.
|
||||
package apierror
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrDetails holds the google/rpc/error_details.proto messages.
|
||||
type ErrDetails struct {
|
||||
ErrorInfo *errdetails.ErrorInfo
|
||||
BadRequest *errdetails.BadRequest
|
||||
PreconditionFailure *errdetails.PreconditionFailure
|
||||
QuotaFailure *errdetails.QuotaFailure
|
||||
RetryInfo *errdetails.RetryInfo
|
||||
ResourceInfo *errdetails.ResourceInfo
|
||||
RequestInfo *errdetails.RequestInfo
|
||||
DebugInfo *errdetails.DebugInfo
|
||||
Help *errdetails.Help
|
||||
LocalizedMessage *errdetails.LocalizedMessage
|
||||
|
||||
// Unknown stores unidentifiable error details.
|
||||
Unknown []interface{}
|
||||
}
|
||||
|
||||
// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages.
|
||||
var ErrMessageNotFound = errors.New("message not found")
|
||||
|
||||
// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the
|
||||
// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type,
|
||||
// the content of the message is copied to the provided message.
|
||||
//
|
||||
// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the
|
||||
// protocol buffer type of the provided message.
|
||||
func (e ErrDetails) ExtractProtoMessage(v proto.Message) error {
|
||||
if v == nil {
|
||||
return ErrMessageNotFound
|
||||
}
|
||||
for _, elem := range e.Unknown {
|
||||
if elemProto, ok := elem.(proto.Message); ok {
|
||||
if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() {
|
||||
proto.Merge(v, elemProto)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return ErrMessageNotFound
|
||||
}
|
||||
|
||||
func (e ErrDetails) String() string {
|
||||
var d strings.Builder
|
||||
if e.ErrorInfo != nil {
|
||||
d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n",
|
||||
e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata()))
|
||||
}
|
||||
|
||||
if e.BadRequest != nil {
|
||||
v := e.BadRequest.GetFieldViolations()
|
||||
var f []string
|
||||
var desc []string
|
||||
for _, x := range v {
|
||||
f = append(f, x.GetField())
|
||||
desc = append(desc, x.GetDescription())
|
||||
}
|
||||
d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n",
|
||||
strings.Join(f, " "), strings.Join(desc, " ")))
|
||||
}
|
||||
|
||||
if e.PreconditionFailure != nil {
|
||||
v := e.PreconditionFailure.GetViolations()
|
||||
var t []string
|
||||
var s []string
|
||||
var desc []string
|
||||
for _, x := range v {
|
||||
t = append(t, x.GetType())
|
||||
s = append(s, x.GetSubject())
|
||||
desc = append(desc, x.GetDescription())
|
||||
}
|
||||
d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "),
|
||||
strings.Join(s, " "), strings.Join(desc, " ")))
|
||||
}
|
||||
|
||||
if e.QuotaFailure != nil {
|
||||
v := e.QuotaFailure.GetViolations()
|
||||
var s []string
|
||||
var desc []string
|
||||
for _, x := range v {
|
||||
s = append(s, x.GetSubject())
|
||||
desc = append(desc, x.GetDescription())
|
||||
}
|
||||
d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n",
|
||||
strings.Join(s, " "), strings.Join(desc, " ")))
|
||||
}
|
||||
|
||||
if e.RequestInfo != nil {
|
||||
d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n",
|
||||
e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData()))
|
||||
}
|
||||
|
||||
if e.ResourceInfo != nil {
|
||||
d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n",
|
||||
e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(),
|
||||
e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription()))
|
||||
|
||||
}
|
||||
if e.RetryInfo != nil {
|
||||
d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration()))
|
||||
|
||||
}
|
||||
if e.Unknown != nil {
|
||||
var s []string
|
||||
for _, x := range e.Unknown {
|
||||
s = append(s, fmt.Sprintf("%v", x))
|
||||
}
|
||||
d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " ")))
|
||||
}
|
||||
|
||||
if e.DebugInfo != nil {
|
||||
d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(),
|
||||
strings.Join(e.DebugInfo.GetStackEntries(), " ")))
|
||||
}
|
||||
if e.Help != nil {
|
||||
var desc []string
|
||||
var url []string
|
||||
for _, x := range e.Help.Links {
|
||||
desc = append(desc, x.GetDescription())
|
||||
url = append(url, x.GetUrl())
|
||||
}
|
||||
d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n",
|
||||
strings.Join(desc, " "), strings.Join(url, " ")))
|
||||
}
|
||||
if e.LocalizedMessage != nil {
|
||||
d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n",
|
||||
e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage()))
|
||||
}
|
||||
|
||||
return d.String()
|
||||
}
|
||||
|
||||
// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It
|
||||
// implements error and Status interfaces.
|
||||
type APIError struct {
|
||||
err error
|
||||
status *status.Status
|
||||
httpErr *googleapi.Error
|
||||
details ErrDetails
|
||||
}
|
||||
|
||||
// Details presents the error details of the APIError.
|
||||
func (a *APIError) Details() ErrDetails {
|
||||
return a.details
|
||||
}
|
||||
|
||||
// Unwrap extracts the original error.
|
||||
func (a *APIError) Unwrap() error {
|
||||
return a.err
|
||||
}
|
||||
|
||||
// Error returns a readable representation of the APIError.
|
||||
func (a *APIError) Error() string {
|
||||
var msg string
|
||||
if a.httpErr != nil {
|
||||
// Truncate the googleapi.Error message because it dumps the Details in
|
||||
// an ugly way.
|
||||
msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message)
|
||||
} else if a.status != nil {
|
||||
msg = a.err.Error()
|
||||
}
|
||||
return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details))
|
||||
}
|
||||
|
||||
// GRPCStatus extracts the underlying gRPC Status error.
|
||||
// This method is necessary to fulfill the interface
|
||||
// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError.
|
||||
func (a *APIError) GRPCStatus() *status.Status {
|
||||
return a.status
|
||||
}
|
||||
|
||||
// Reason returns the reason in an ErrorInfo.
|
||||
// If ErrorInfo is nil, it returns an empty string.
|
||||
func (a *APIError) Reason() string {
|
||||
return a.details.ErrorInfo.GetReason()
|
||||
}
|
||||
|
||||
// Domain returns the domain in an ErrorInfo.
|
||||
// If ErrorInfo is nil, it returns an empty string.
|
||||
func (a *APIError) Domain() string {
|
||||
return a.details.ErrorInfo.GetDomain()
|
||||
}
|
||||
|
||||
// Metadata returns the metadata in an ErrorInfo.
|
||||
// If ErrorInfo is nil, it returns nil.
|
||||
func (a *APIError) Metadata() map[string]string {
|
||||
return a.details.ErrorInfo.GetMetadata()
|
||||
|
||||
}
|
||||
|
||||
// setDetailsFromError parses a Status error or a googleapi.Error
|
||||
// and sets status and details or httpErr and details, respectively.
|
||||
// It returns false if neither Status nor googleapi.Error can be parsed.
|
||||
// When err is a googleapi.Error, the status of the returned error will
|
||||
// be set to an Unknown error, rather than nil, since a nil code is
|
||||
// interpreted as OK in the gRPC status package.
|
||||
func (a *APIError) setDetailsFromError(err error) bool {
|
||||
st, isStatus := status.FromError(err)
|
||||
var herr *googleapi.Error
|
||||
isHTTPErr := errors.As(err, &herr)
|
||||
|
||||
switch {
|
||||
case isStatus:
|
||||
a.status = st
|
||||
a.details = parseDetails(st.Details())
|
||||
case isHTTPErr:
|
||||
a.httpErr = herr
|
||||
a.details = parseHTTPDetails(herr)
|
||||
a.status = status.New(codes.Unknown, herr.Message)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FromError parses a Status error or a googleapi.Error and builds an
|
||||
// APIError, wrapping the provided error in the new APIError. It
|
||||
// returns false if neither Status nor googleapi.Error can be parsed.
|
||||
func FromError(err error) (*APIError, bool) {
|
||||
return ParseError(err, true)
|
||||
}
|
||||
|
||||
// ParseError parses a Status error or a googleapi.Error and builds an
|
||||
// APIError. If wrap is true, it wraps the error in the new APIError.
|
||||
// It returns false if neither Status nor googleapi.Error can be parsed.
|
||||
func ParseError(err error, wrap bool) (*APIError, bool) {
|
||||
if err == nil {
|
||||
return nil, false
|
||||
}
|
||||
ae := APIError{}
|
||||
if wrap {
|
||||
ae = APIError{err: err}
|
||||
}
|
||||
if !ae.setDetailsFromError(err) {
|
||||
return nil, false
|
||||
}
|
||||
return &ae, true
|
||||
}
|
||||
|
||||
// parseDetails accepts a slice of interface{} that should be backed by some
|
||||
// sort of proto.Message that can be cast to the google/rpc/error_details.proto
|
||||
// types.
|
||||
//
|
||||
// This is for internal use only.
|
||||
func parseDetails(details []interface{}) ErrDetails {
|
||||
var ed ErrDetails
|
||||
for _, d := range details {
|
||||
switch d := d.(type) {
|
||||
case *errdetails.ErrorInfo:
|
||||
ed.ErrorInfo = d
|
||||
case *errdetails.BadRequest:
|
||||
ed.BadRequest = d
|
||||
case *errdetails.PreconditionFailure:
|
||||
ed.PreconditionFailure = d
|
||||
case *errdetails.QuotaFailure:
|
||||
ed.QuotaFailure = d
|
||||
case *errdetails.RetryInfo:
|
||||
ed.RetryInfo = d
|
||||
case *errdetails.ResourceInfo:
|
||||
ed.ResourceInfo = d
|
||||
case *errdetails.RequestInfo:
|
||||
ed.RequestInfo = d
|
||||
case *errdetails.DebugInfo:
|
||||
ed.DebugInfo = d
|
||||
case *errdetails.Help:
|
||||
ed.Help = d
|
||||
case *errdetails.LocalizedMessage:
|
||||
ed.LocalizedMessage = d
|
||||
default:
|
||||
ed.Unknown = append(ed.Unknown, d)
|
||||
}
|
||||
}
|
||||
|
||||
return ed
|
||||
}
|
||||
|
||||
// parseHTTPDetails will convert the given googleapi.Error into the protobuf
|
||||
// representation then parse the Any values that contain the error details.
|
||||
//
|
||||
// This is for internal use only.
|
||||
func parseHTTPDetails(gae *googleapi.Error) ErrDetails {
|
||||
e := &jsonerror.Error{}
|
||||
if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil {
|
||||
// If the error body does not conform to the error schema, ignore it
|
||||
// altogther. See https://cloud.google.com/apis/design/errors#http_mapping.
|
||||
return ErrDetails{}
|
||||
}
|
||||
|
||||
// Coerce the Any messages into proto.Message then parse the details.
|
||||
details := []interface{}{}
|
||||
for _, any := range e.GetError().GetDetails() {
|
||||
m, err := any.UnmarshalNew()
|
||||
if err != nil {
|
||||
// Ignore malformed Any values.
|
||||
continue
|
||||
}
|
||||
details = append(details, m)
|
||||
}
|
||||
|
||||
return parseDetails(details)
|
||||
}
|
30
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
generated
vendored
Normal file
30
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
# HTTP JSON Error Schema
|
||||
|
||||
The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey
|
||||
error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping.
|
||||
This package is for internal parsing logic only and should not be used in any
|
||||
other context.
|
||||
|
||||
## Regeneration
|
||||
|
||||
To regenerate the protobuf Go code you will need the following:
|
||||
|
||||
* A local copy of [googleapis], the absolute path to which should be exported to
|
||||
the environment variable `GOOGLEAPIS`
|
||||
* The protobuf compiler [protoc]
|
||||
* The Go [protobuf plugin]
|
||||
* The [goimports] tool
|
||||
|
||||
From this directory run the following command:
|
||||
```sh
|
||||
protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto
|
||||
goimports -w .
|
||||
```
|
||||
|
||||
Note: the `module` plugin option ensures the generated code is placed in this
|
||||
directory, and not in several nested directories defined by `go_package` option.
|
||||
|
||||
[googleapis]: https://github.com/googleapis/googleapis
|
||||
[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation
|
||||
[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated
|
||||
[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports
|
256
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go
generated
vendored
Normal file
256
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,256 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.17.3
|
||||
// source: custom_error.proto
|
||||
|
||||
package jsonerror
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Error code for `CustomError`.
|
||||
type CustomError_CustomErrorCode int32
|
||||
|
||||
const (
|
||||
// Default error.
|
||||
CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0
|
||||
// Too many foo.
|
||||
CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1
|
||||
// Not enough foo.
|
||||
CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2
|
||||
// Catastrophic error.
|
||||
CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3
|
||||
)
|
||||
|
||||
// Enum value maps for CustomError_CustomErrorCode.
|
||||
var (
|
||||
CustomError_CustomErrorCode_name = map[int32]string{
|
||||
0: "CUSTOM_ERROR_CODE_UNSPECIFIED",
|
||||
1: "TOO_MANY_FOO",
|
||||
2: "NOT_ENOUGH_FOO",
|
||||
3: "UNIVERSE_WAS_DESTROYED",
|
||||
}
|
||||
CustomError_CustomErrorCode_value = map[string]int32{
|
||||
"CUSTOM_ERROR_CODE_UNSPECIFIED": 0,
|
||||
"TOO_MANY_FOO": 1,
|
||||
"NOT_ENOUGH_FOO": 2,
|
||||
"UNIVERSE_WAS_DESTROYED": 3,
|
||||
}
|
||||
)
|
||||
|
||||
func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode {
|
||||
p := new(CustomError_CustomErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x CustomError_CustomErrorCode) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_custom_error_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (CustomError_CustomErrorCode) Type() protoreflect.EnumType {
|
||||
return &file_custom_error_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead.
|
||||
func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) {
|
||||
return file_custom_error_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
// CustomError is an example of a custom error message which may be included
|
||||
// in an rpc status. It is not meant to reflect a standard error.
|
||||
type CustomError struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Error code specific to the custom API being invoked.
|
||||
Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"`
|
||||
// Name of the failed entity.
|
||||
Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"`
|
||||
// Message that describes the error.
|
||||
ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CustomError) Reset() {
|
||||
*x = CustomError{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_custom_error_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CustomError) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CustomError) ProtoMessage() {}
|
||||
|
||||
func (x *CustomError) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_custom_error_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CustomError.ProtoReflect.Descriptor instead.
|
||||
func (*CustomError) Descriptor() ([]byte, []int) {
|
||||
return file_custom_error_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *CustomError) GetCode() CustomError_CustomErrorCode {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (x *CustomError) GetEntity() string {
|
||||
if x != nil {
|
||||
return x.Entity
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CustomError) GetErrorMessage() string {
|
||||
if x != nil {
|
||||
return x.ErrorMessage
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_custom_error_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_custom_error_proto_rawDesc = []byte{
|
||||
0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b,
|
||||
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63,
|
||||
0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f,
|
||||
0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75,
|
||||
0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63,
|
||||
0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65,
|
||||
0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43,
|
||||
0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52,
|
||||
0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
|
||||
0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41,
|
||||
0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f,
|
||||
0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16,
|
||||
0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53,
|
||||
0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
|
||||
0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65,
|
||||
0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_custom_error_proto_rawDescOnce sync.Once
|
||||
file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_custom_error_proto_rawDescGZIP() []byte {
|
||||
file_custom_error_proto_rawDescOnce.Do(func() {
|
||||
file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData)
|
||||
})
|
||||
return file_custom_error_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_custom_error_proto_goTypes = []interface{}{
|
||||
(CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode
|
||||
(*CustomError)(nil), // 1: error.CustomError
|
||||
}
|
||||
var file_custom_error_proto_depIdxs = []int32{
|
||||
0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_custom_error_proto_init() }
|
||||
func file_custom_error_proto_init() {
|
||||
if File_custom_error_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CustomError); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_custom_error_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_custom_error_proto_goTypes,
|
||||
DependencyIndexes: file_custom_error_proto_depIdxs,
|
||||
EnumInfos: file_custom_error_proto_enumTypes,
|
||||
MessageInfos: file_custom_error_proto_msgTypes,
|
||||
}.Build()
|
||||
File_custom_error_proto = out.File
|
||||
file_custom_error_proto_rawDesc = nil
|
||||
file_custom_error_proto_goTypes = nil
|
||||
file_custom_error_proto_depIdxs = nil
|
||||
}
|
50
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto
generated
vendored
Normal file
50
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package error;
|
||||
|
||||
option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
|
||||
|
||||
|
||||
// CustomError is an example of a custom error message which may be included
|
||||
// in an rpc status. It is not meant to reflect a standard error.
|
||||
message CustomError {
|
||||
|
||||
// Error code for `CustomError`.
|
||||
enum CustomErrorCode {
|
||||
// Default error.
|
||||
CUSTOM_ERROR_CODE_UNSPECIFIED = 0;
|
||||
|
||||
// Too many foo.
|
||||
TOO_MANY_FOO = 1;
|
||||
|
||||
// Not enough foo.
|
||||
NOT_ENOUGH_FOO = 2;
|
||||
|
||||
// Catastrophic error.
|
||||
UNIVERSE_WAS_DESTROYED = 3;
|
||||
|
||||
}
|
||||
|
||||
// Error code specific to the custom API being invoked.
|
||||
CustomErrorCode code = 1;
|
||||
|
||||
// Name of the failed entity.
|
||||
string entity = 2;
|
||||
|
||||
// Message that describes the error.
|
||||
string error_message = 3;
|
||||
}
|
280
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
generated
vendored
Normal file
280
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,280 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.15.8
|
||||
// source: apierror/internal/proto/error.proto
|
||||
|
||||
package jsonerror
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
code "google.golang.org/genproto/googleapis/rpc/code"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// The error format v2 for Google JSON REST APIs.
|
||||
// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
|
||||
//
|
||||
// NOTE: This schema is not used for other wire protocols.
|
||||
type Error struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The actual error payload. The nested message structure is for backward
|
||||
// compatibility with Google API client libraries. It also makes the error
|
||||
// more readable to developers.
|
||||
Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Error) Reset() {
|
||||
*x = Error{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Error) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Error) ProtoMessage() {}
|
||||
|
||||
func (x *Error) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Error.ProtoReflect.Descriptor instead.
|
||||
func (*Error) Descriptor() ([]byte, []int) {
|
||||
return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Error) GetError() *Error_Status {
|
||||
if x != nil {
|
||||
return x.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This message has the same semantics as `google.rpc.Status`. It uses HTTP
|
||||
// status code instead of gRPC status code. It has an extra field `status`
|
||||
// for backward compatibility with Google API Client Libraries.
|
||||
type Error_Status struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The HTTP status code that corresponds to `google.rpc.Status.code`.
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
// This corresponds to `google.rpc.Status.message`.
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
// This is the enum version for `google.rpc.Status.code`.
|
||||
Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"`
|
||||
// This corresponds to `google.rpc.Status.details`.
|
||||
Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Error_Status) Reset() {
|
||||
*x = Error_Status{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Error_Status) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Error_Status) ProtoMessage() {}
|
||||
|
||||
func (x *Error_Status) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead.
|
||||
func (*Error_Status) Descriptor() ([]byte, []int) {
|
||||
return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *Error_Status) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Error_Status) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Error_Status) GetStatus() code.Code {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return code.Code(0)
|
||||
}
|
||||
|
||||
func (x *Error_Status) GetDetails() []*anypb.Any {
|
||||
if x != nil {
|
||||
return x.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_apierror_internal_proto_error_proto_rawDesc = []byte{
|
||||
0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
|
||||
0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
|
||||
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5,
|
||||
0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
|
||||
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
|
||||
0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72,
|
||||
0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06,
|
||||
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06,
|
||||
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
||||
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64,
|
||||
0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
|
||||
0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72,
|
||||
0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_apierror_internal_proto_error_proto_rawDescOnce sync.Once
|
||||
file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte {
|
||||
file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() {
|
||||
file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData)
|
||||
})
|
||||
return file_apierror_internal_proto_error_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_apierror_internal_proto_error_proto_goTypes = []interface{}{
|
||||
(*Error)(nil), // 0: error.Error
|
||||
(*Error_Status)(nil), // 1: error.Error.Status
|
||||
(code.Code)(0), // 2: google.rpc.Code
|
||||
(*anypb.Any)(nil), // 3: google.protobuf.Any
|
||||
}
|
||||
var file_apierror_internal_proto_error_proto_depIdxs = []int32{
|
||||
1, // 0: error.Error.error:type_name -> error.Error.Status
|
||||
2, // 1: error.Error.Status.status:type_name -> google.rpc.Code
|
||||
3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_apierror_internal_proto_error_proto_init() }
|
||||
func file_apierror_internal_proto_error_proto_init() {
|
||||
if File_apierror_internal_proto_error_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Error); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Error_Status); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_apierror_internal_proto_error_proto_goTypes,
|
||||
DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs,
|
||||
MessageInfos: file_apierror_internal_proto_error_proto_msgTypes,
|
||||
}.Build()
|
||||
File_apierror_internal_proto_error_proto = out.File
|
||||
file_apierror_internal_proto_error_proto_rawDesc = nil
|
||||
file_apierror_internal_proto_error_proto_goTypes = nil
|
||||
file_apierror_internal_proto_error_proto_depIdxs = nil
|
||||
}
|
46
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
generated
vendored
Normal file
46
vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package error;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/rpc/code.proto";
|
||||
|
||||
option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
|
||||
|
||||
// The error format v2 for Google JSON REST APIs.
|
||||
// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
|
||||
//
|
||||
// NOTE: This schema is not used for other wire protocols.
|
||||
message Error {
|
||||
// This message has the same semantics as `google.rpc.Status`. It uses HTTP
|
||||
// status code instead of gRPC status code. It has an extra field `status`
|
||||
// for backward compatibility with Google API Client Libraries.
|
||||
message Status {
|
||||
// The HTTP status code that corresponds to `google.rpc.Status.code`.
|
||||
int32 code = 1;
|
||||
// This corresponds to `google.rpc.Status.message`.
|
||||
string message = 2;
|
||||
// This is the enum version for `google.rpc.Status.code`.
|
||||
google.rpc.Code status = 4;
|
||||
// This corresponds to `google.rpc.Status.details`.
|
||||
repeated google.protobuf.Any details = 5;
|
||||
}
|
||||
// The actual error payload. The nested message structure is for backward
|
||||
// compatibility with Google API client libraries. It also makes the error
|
||||
// more readable to developers.
|
||||
Status error = 1;
|
||||
}
|
101
vendor/github.com/googleapis/gax-go/v2/call_option.go
generated
vendored
101
vendor/github.com/googleapis/gax-go/v2/call_option.go
generated
vendored
|
@ -30,9 +30,11 @@
|
|||
package gax
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -47,7 +49,7 @@ type CallOption interface {
|
|||
|
||||
// Retryer is used by Invoke to determine retry behavior.
|
||||
type Retryer interface {
|
||||
// Retry reports whether a request should be retriedand how long to pause before retrying
|
||||
// Retry reports whether a request should be retried and how long to pause before retrying
|
||||
// if the previous attempt returned with err. Invoke never calls Retry with nil error.
|
||||
Retry(err error) (pause time.Duration, shouldRetry bool)
|
||||
}
|
||||
|
@ -63,6 +65,31 @@ func WithRetry(fn func() Retryer) CallOption {
|
|||
return retryerOption(fn)
|
||||
}
|
||||
|
||||
// OnErrorFunc returns a Retryer that retries if and only if the previous attempt
|
||||
// returns an error that satisfies shouldRetry.
|
||||
//
|
||||
// Pause times between retries are specified by bo. bo is only used for its
|
||||
// parameters; each Retryer has its own copy.
|
||||
func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer {
|
||||
return &errorRetryer{
|
||||
shouldRetry: shouldRetry,
|
||||
backoff: bo,
|
||||
}
|
||||
}
|
||||
|
||||
type errorRetryer struct {
|
||||
backoff Backoff
|
||||
shouldRetry func(err error) bool
|
||||
}
|
||||
|
||||
func (r *errorRetryer) Retry(err error) (time.Duration, bool) {
|
||||
if r.shouldRetry(err) {
|
||||
return r.backoff.Pause(), true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// OnCodes returns a Retryer that retries if and only if
|
||||
// the previous attempt returns a GRPC error whose error code is stored in cc.
|
||||
// Pause times between retries are specified by bo.
|
||||
|
@ -94,22 +121,60 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) {
|
|||
return 0, false
|
||||
}
|
||||
|
||||
// Backoff implements exponential backoff.
|
||||
// The wait time between retries is a random value between 0 and the "retry envelope".
|
||||
// The envelope starts at Initial and increases by the factor of Multiplier every retry,
|
||||
// but is capped at Max.
|
||||
// OnHTTPCodes returns a Retryer that retries if and only if
|
||||
// the previous attempt returns a googleapi.Error whose status code is stored in
|
||||
// cc. Pause times between retries are specified by bo.
|
||||
//
|
||||
// bo is only used for its parameters; each Retryer has its own copy.
|
||||
func OnHTTPCodes(bo Backoff, cc ...int) Retryer {
|
||||
codes := make(map[int]bool, len(cc))
|
||||
for _, c := range cc {
|
||||
codes[c] = true
|
||||
}
|
||||
|
||||
return &httpRetryer{
|
||||
backoff: bo,
|
||||
codes: codes,
|
||||
}
|
||||
}
|
||||
|
||||
type httpRetryer struct {
|
||||
backoff Backoff
|
||||
codes map[int]bool
|
||||
}
|
||||
|
||||
func (r *httpRetryer) Retry(err error) (time.Duration, bool) {
|
||||
var gerr *googleapi.Error
|
||||
if !errors.As(err, &gerr) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if r.codes[gerr.Code] {
|
||||
return r.backoff.Pause(), true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Backoff implements exponential backoff. The wait time between retries is a
|
||||
// random value between 0 and the "retry period" - the time between retries. The
|
||||
// retry period starts at Initial and increases by the factor of Multiplier
|
||||
// every retry, but is capped at Max.
|
||||
//
|
||||
// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should
|
||||
// be built on top of Backoff.
|
||||
type Backoff struct {
|
||||
// Initial is the initial value of the retry envelope, defaults to 1 second.
|
||||
// Initial is the initial value of the retry period, defaults to 1 second.
|
||||
Initial time.Duration
|
||||
|
||||
// Max is the maximum value of the retry envelope, defaults to 30 seconds.
|
||||
// Max is the maximum value of the retry period, defaults to 30 seconds.
|
||||
Max time.Duration
|
||||
|
||||
// Multiplier is the factor by which the retry envelope increases.
|
||||
// Multiplier is the factor by which the retry period increases.
|
||||
// It should be greater than 1 and defaults to 2.
|
||||
Multiplier float64
|
||||
|
||||
// cur is the current retry envelope
|
||||
// cur is the current retry period.
|
||||
cur time.Duration
|
||||
}
|
||||
|
||||
|
@ -145,6 +210,21 @@ func (o grpcOpt) Resolve(s *CallSettings) {
|
|||
s.GRPC = o
|
||||
}
|
||||
|
||||
type pathOpt struct {
|
||||
p string
|
||||
}
|
||||
|
||||
func (p pathOpt) Resolve(s *CallSettings) {
|
||||
s.Path = p.p
|
||||
}
|
||||
|
||||
// WithPath applies a Path override to the HTTP-based APICall.
|
||||
//
|
||||
// This is for internal use only.
|
||||
func WithPath(p string) CallOption {
|
||||
return &pathOpt{p: p}
|
||||
}
|
||||
|
||||
// WithGRPCOptions allows passing gRPC call options during client creation.
|
||||
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
|
||||
return grpcOpt(append([]grpc.CallOption(nil), opt...))
|
||||
|
@ -158,4 +238,7 @@ type CallSettings struct {
|
|||
|
||||
// CallOptions to be forwarded to GRPC.
|
||||
GRPC []grpc.CallOption
|
||||
|
||||
// Path is an HTTP override for an APICall.
|
||||
Path string
|
||||
}
|
||||
|
|
112
vendor/github.com/googleapis/gax-go/v2/content_type.go
generated
vendored
Normal file
112
vendor/github.com/googleapis/gax-go/v2/content_type.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
// Copyright 2022, Google Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package gax
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const sniffBuffSize = 512
|
||||
|
||||
func newContentSniffer(r io.Reader) *contentSniffer {
|
||||
return &contentSniffer{r: r}
|
||||
}
|
||||
|
||||
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
|
||||
type contentSniffer struct {
|
||||
r io.Reader
|
||||
start []byte // buffer for the sniffed bytes.
|
||||
err error // set to any error encountered while reading bytes to be sniffed.
|
||||
|
||||
ctype string // set on first sniff.
|
||||
sniffed bool // set to true on first sniff.
|
||||
}
|
||||
|
||||
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
|
||||
// Ensure that the content type is sniffed before any data is consumed from Reader.
|
||||
_, _ = cs.ContentType()
|
||||
|
||||
if len(cs.start) > 0 {
|
||||
n := copy(p, cs.start)
|
||||
cs.start = cs.start[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// We may have read some bytes into start while sniffing, even if the read ended in an error.
|
||||
// We should first return those bytes, then the error.
|
||||
if cs.err != nil {
|
||||
return 0, cs.err
|
||||
}
|
||||
|
||||
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
|
||||
return cs.r.Read(p)
|
||||
}
|
||||
|
||||
// ContentType returns the sniffed content type, and whether the content type was successfully sniffed.
|
||||
func (cs *contentSniffer) ContentType() (string, bool) {
|
||||
if cs.sniffed {
|
||||
return cs.ctype, cs.ctype != ""
|
||||
}
|
||||
cs.sniffed = true
|
||||
// If ReadAll hits EOF, it returns err==nil.
|
||||
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
|
||||
|
||||
// Don't try to detect the content type based on possibly incomplete data.
|
||||
if cs.err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
cs.ctype = http.DetectContentType(cs.start)
|
||||
return cs.ctype, true
|
||||
}
|
||||
|
||||
// DetermineContentType determines the content type of the supplied reader.
|
||||
// The content of media will be sniffed to determine the content type.
|
||||
// After calling DetectContentType the caller must not perform further reads on
|
||||
// media, but rather read from the Reader that is returned.
|
||||
func DetermineContentType(media io.Reader) (io.Reader, string) {
|
||||
// For backwards compatibility, allow clients to set content
|
||||
// type by providing a ContentTyper for media.
|
||||
// Note: This is an anonymous interface definition copied from googleapi.ContentTyper.
|
||||
if typer, ok := media.(interface {
|
||||
ContentType() string
|
||||
}); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
sniffer := newContentSniffer(media)
|
||||
if ctype, ok := sniffer.ContentType(); ok {
|
||||
return sniffer, ctype
|
||||
}
|
||||
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
|
||||
return sniffer, ""
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue