forked from TrueCloudLab/rclone
vendor: update all dependencies
This commit is contained in:
parent
8190a81201
commit
613a9bb86b
448 changed files with 62205 additions and 13395 deletions
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
|
@ -61,6 +61,8 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
|
|||
v.IPRange.String(),
|
||||
string(v.Protocol),
|
||||
v.Version,
|
||||
resource,
|
||||
"", // signed timestamp, @TODO add for snapshot sas feature
|
||||
v.CacheControl, // rscc
|
||||
v.ContentDisposition, // rscd
|
||||
v.ContentEncoding, // rsce
|
||||
|
|
11
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
11
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
|
@ -69,6 +69,17 @@ func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac
|
|||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
|
||||
func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
||||
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
|
||||
transactionalMD5, nil, transactionalMD5, ac.LeaseAccessConditions.pointers(),
|
||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
type AppendBlobAccessConditions struct {
|
||||
ModifiedAccessConditions
|
||||
LeaseAccessConditions
|
||||
|
|
14
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
14
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
|
@ -72,6 +72,20 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea
|
|||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
|
||||
// The sourceOffset specifies the start offset of source data to copy from.
|
||||
// The destOffset specifies the start offset of data in page blob will be written to.
|
||||
// The count must be a multiple of 512 bytes.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
|
||||
func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
||||
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
|
||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, ac.LeaseAccessConditions.pointers(),
|
||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
|
||||
}
|
||||
|
||||
// ClearPages frees the specified pages from the page blob.
|
||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) {
|
||||
|
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
package azblob
|
||||
|
||||
const serviceLibVersion = "0.5"
|
||||
const serviceLibVersion = "0.6"
|
||||
|
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build linux darwin freebsd openbsd netbsd
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package azblob
|
||||
|
||||
|
|
10
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
generated
vendored
10
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
generated
vendored
|
@ -17,6 +17,9 @@ type PipelineOptions struct {
|
|||
|
||||
// Telemetry configures the built-in telemetry policy behavior.
|
||||
Telemetry TelemetryOptions
|
||||
|
||||
// HTTPSender configures the sender of HTTP requests
|
||||
HTTPSender pipeline.Factory
|
||||
}
|
||||
|
||||
// NewPipeline creates a Pipeline using the specified credentials and options.
|
||||
|
@ -35,8 +38,9 @@ func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
|||
f = append(f, c)
|
||||
}
|
||||
f = append(f,
|
||||
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
|
||||
NewRequestLogPolicyFactory(o.RequestLog))
|
||||
NewRequestLogPolicyFactory(o.RequestLog),
|
||||
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
|
||||
|
||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log})
|
||||
|
||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log})
|
||||
}
|
||||
|
|
6
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
6
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
|
@ -120,7 +120,8 @@ func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never
|
|||
}
|
||||
|
||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
||||
delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
||||
if delay > o.MaxRetryDelay {
|
||||
delay = o.MaxRetryDelay
|
||||
}
|
||||
|
@ -157,7 +158,8 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
|||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
||||
time.Sleep(delay) // The 1st try returns 0 delay
|
||||
} else {
|
||||
delay := time.Second * time.Duration(rand.Float32()/2+0.8)
|
||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
||||
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
|
||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
||||
}
|
||||
|
|
105
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
105
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
generated
vendored
|
@ -6,14 +6,13 @@ package azblob
|
|||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.
|
||||
|
@ -123,6 +122,108 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip
|
|||
return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob
|
||||
// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created
|
||||
// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
|
||||
// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
|
||||
// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
|
||||
// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
|
||||
// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
|
||||
// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
|
||||
// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
|
||||
// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
|
||||
// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
|
||||
// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
|
||||
// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
|
||||
// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
|
||||
// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
|
||||
// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
|
||||
// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
|
||||
// logs when storage analytics logging is enabled.
|
||||
func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*AppendBlobAppendBlockFromURLResponse), err
|
||||
}
|
||||
|
||||
// appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "appendblock")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||
if sourceRange != nil {
|
||||
req.Header.Set("x-ms-source-range", *sourceRange)
|
||||
}
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
if transactionalContentMD5 != nil {
|
||||
req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5))
|
||||
}
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if maxSize != nil {
|
||||
req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10))
|
||||
}
|
||||
if appendPosition != nil {
|
||||
req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request.
|
||||
func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// Create the Create Append Blob operation creates a new append blob.
|
||||
//
|
||||
// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
|
||||
|
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
generated
vendored
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
const (
|
||||
// ServiceVersion specifies the version of the operations used in this package.
|
||||
ServiceVersion = "2018-03-28"
|
||||
ServiceVersion = "2018-11-09"
|
||||
)
|
||||
|
||||
// managementClient is the base client for Azblob.
|
||||
|
|
194
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
194
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
generated
vendored
|
@ -654,6 +654,103 @@ func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err
|
|||
return d.DecodeElement(ap2, &start)
|
||||
}
|
||||
|
||||
// AppendBlobAppendBlockFromURLResponse ...
|
||||
type AppendBlobAppendBlockFromURLResponse struct {
|
||||
rawResponse *http.Response
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) Response() *http.Response {
|
||||
return ababfur.rawResponse
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) StatusCode() int {
|
||||
return ababfur.rawResponse.StatusCode
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) Status() string {
|
||||
return ababfur.rawResponse.Status
|
||||
}
|
||||
|
||||
// BlobAppendOffset returns the value for header x-ms-blob-append-offset.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) BlobAppendOffset() string {
|
||||
return ababfur.rawResponse.Header.Get("x-ms-blob-append-offset")
|
||||
}
|
||||
|
||||
// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) BlobCommittedBlockCount() int32 {
|
||||
s := ababfur.rawResponse.Header.Get("x-ms-blob-committed-block-count")
|
||||
if s == "" {
|
||||
return -1
|
||||
}
|
||||
i, err := strconv.ParseInt(s, 10, 32)
|
||||
if err != nil {
|
||||
i = 0
|
||||
}
|
||||
return int32(i)
|
||||
}
|
||||
|
||||
// ContentMD5 returns the value for header Content-MD5.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) ContentMD5() []byte {
|
||||
s := ababfur.rawResponse.Header.Get("Content-MD5")
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
b = nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time {
|
||||
s := ababfur.rawResponse.Header.Get("Date")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
t = time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ErrorCode returns the value for header x-ms-error-code.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string {
|
||||
return ababfur.rawResponse.Header.Get("x-ms-error-code")
|
||||
}
|
||||
|
||||
// ETag returns the value for header ETag.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag {
|
||||
return ETag(ababfur.rawResponse.Header.Get("ETag"))
|
||||
}
|
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time {
|
||||
s := ababfur.rawResponse.Header.Get("Last-Modified")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
t = time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) RequestID() string {
|
||||
return ababfur.rawResponse.Header.Get("x-ms-request-id")
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string {
|
||||
return ababfur.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// AppendBlobAppendBlockResponse ...
|
||||
type AppendBlobAppendBlockResponse struct {
|
||||
rawResponse *http.Response
|
||||
|
@ -4193,6 +4290,103 @@ func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string {
|
|||
return pbusnr.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// PageBlobUploadPagesFromURLResponse ...
|
||||
type PageBlobUploadPagesFromURLResponse struct {
|
||||
rawResponse *http.Response
|
||||
}
|
||||
|
||||
// Response returns the raw HTTP response object.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) Response() *http.Response {
|
||||
return pbupfur.rawResponse
|
||||
}
|
||||
|
||||
// StatusCode returns the HTTP status code of the response, e.g. 200.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) StatusCode() int {
|
||||
return pbupfur.rawResponse.StatusCode
|
||||
}
|
||||
|
||||
// Status returns the HTTP status message of the response, e.g. "200 OK".
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) Status() string {
|
||||
return pbupfur.rawResponse.Status
|
||||
}
|
||||
|
||||
// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) BlobSequenceNumber() int64 {
|
||||
s := pbupfur.rawResponse.Header.Get("x-ms-blob-sequence-number")
|
||||
if s == "" {
|
||||
return -1
|
||||
}
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
i = 0
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// ContentMD5 returns the value for header Content-MD5.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) ContentMD5() []byte {
|
||||
s := pbupfur.rawResponse.Header.Get("Content-MD5")
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
b = nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Date returns the value for header Date.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time {
|
||||
s := pbupfur.rawResponse.Header.Get("Date")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
t = time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ErrorCode returns the value for header x-ms-error-code.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string {
|
||||
return pbupfur.rawResponse.Header.Get("x-ms-error-code")
|
||||
}
|
||||
|
||||
// ETag returns the value for header ETag.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) ETag() ETag {
|
||||
return ETag(pbupfur.rawResponse.Header.Get("ETag"))
|
||||
}
|
||||
|
||||
// IsServerEncrypted returns the value for header x-ms-request-server-encrypted.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) IsServerEncrypted() string {
|
||||
return pbupfur.rawResponse.Header.Get("x-ms-request-server-encrypted")
|
||||
}
|
||||
|
||||
// LastModified returns the value for header Last-Modified.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) LastModified() time.Time {
|
||||
s := pbupfur.rawResponse.Header.Get("Last-Modified")
|
||||
if s == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, s)
|
||||
if err != nil {
|
||||
t = time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// RequestID returns the value for header x-ms-request-id.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) RequestID() string {
|
||||
return pbupfur.rawResponse.Header.Get("x-ms-request-id")
|
||||
}
|
||||
|
||||
// Version returns the value for header x-ms-version.
|
||||
func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string {
|
||||
return pbupfur.rawResponse.Header.Get("x-ms-version")
|
||||
}
|
||||
|
||||
// PageBlobUploadPagesResponse ...
|
||||
type PageBlobUploadPagesResponse struct {
|
||||
rawResponse *http.Response
|
||||
|
|
101
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
101
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
generated
vendored
|
@ -778,3 +778,104 @@ func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipel
|
|||
resp.Response().Body.Close()
|
||||
return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
||||
// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read
|
||||
// from a URL
|
||||
//
|
||||
// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The
|
||||
// length of this range should match the ContentLength header and x-ms-range/Range destination range header.
|
||||
// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
|
||||
// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
|
||||
// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
|
||||
// seconds. For more information, see <a
|
||||
// href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
|
||||
// Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
|
||||
// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
|
||||
// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
|
||||
// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
|
||||
// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
|
||||
// specify this header value to operate only on a blob if it has been modified since the specified date/time.
|
||||
// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
|
||||
// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
|
||||
// specify an ETag value to operate only on blobs without a matching value. requestID is provides a client-generated,
|
||||
// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is
|
||||
// enabled.
|
||||
func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) {
|
||||
if err := validate([]validation{
|
||||
{targetValue: timeout,
|
||||
constraints: []constraint{{target: "timeout", name: null, rule: false,
|
||||
chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*PageBlobUploadPagesFromURLResponse), err
|
||||
}
|
||||
|
||||
// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) {
|
||||
req, err := pipeline.NewRequest("PUT", client.url, nil)
|
||||
if err != nil {
|
||||
return req, pipeline.NewError(err, "failed to create request")
|
||||
}
|
||||
params := req.URL.Query()
|
||||
if timeout != nil {
|
||||
params.Set("timeout", strconv.FormatInt(int64(*timeout), 10))
|
||||
}
|
||||
params.Set("comp", "page")
|
||||
req.URL.RawQuery = params.Encode()
|
||||
req.Header.Set("x-ms-copy-source", sourceURL)
|
||||
req.Header.Set("x-ms-source-range", sourceRange)
|
||||
if sourceContentMD5 != nil {
|
||||
req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5))
|
||||
}
|
||||
req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
||||
req.Header.Set("x-ms-range", rangeParameter)
|
||||
if leaseID != nil {
|
||||
req.Header.Set("x-ms-lease-id", *leaseID)
|
||||
}
|
||||
if ifSequenceNumberLessThanOrEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10))
|
||||
}
|
||||
if ifSequenceNumberLessThan != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10))
|
||||
}
|
||||
if ifSequenceNumberEqualTo != nil {
|
||||
req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10))
|
||||
}
|
||||
if ifModifiedSince != nil {
|
||||
req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifUnmodifiedSince != nil {
|
||||
req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123))
|
||||
}
|
||||
if ifMatch != nil {
|
||||
req.Header.Set("If-Match", string(*ifMatch))
|
||||
}
|
||||
if ifNoneMatch != nil {
|
||||
req.Header.Set("If-None-Match", string(*ifNoneMatch))
|
||||
}
|
||||
req.Header.Set("x-ms-version", ServiceVersion)
|
||||
if requestID != nil {
|
||||
req.Header.Set("x-ms-client-request-id", *requestID)
|
||||
}
|
||||
req.Header.Set("x-ms-page-write", "update")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request.
|
||||
func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) {
|
||||
err := validateResponse(resp, http.StatusOK, http.StatusCreated)
|
||||
if resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Response().Body)
|
||||
resp.Response().Body.Close()
|
||||
return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err
|
||||
}
|
||||
|
|
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
generated
vendored
2
vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
generated
vendored
|
@ -5,7 +5,7 @@ package azblob
|
|||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/0.0.0 azblob/2018-03-28"
|
||||
return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
AWS SDK for Go
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2014-2015 Stripe, Inc.
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
|
@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool {
|
|||
rb := reflect.Indirect(reflect.ValueOf(b))
|
||||
|
||||
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
||||
// If the elements are both nil, and of the same type the are equal
|
||||
// If the elements are both nil, and of the same type they are equal
|
||||
// If they are of different types they are not equal
|
||||
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
||||
} else if raValid != rbValid {
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
|
@ -118,6 +118,12 @@ var LogHTTPResponseHandler = request.NamedHandler{
|
|||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
|
||||
if r.HTTPResponse == nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
|
||||
return
|
||||
}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
|
@ -80,6 +80,7 @@ package stscreds
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -89,7 +90,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
|
||||
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
|
||||
// An error is returned if reading from stdin fails.
|
||||
//
|
||||
// Use this function go read MFA tokens from stdin. The function makes no attempt
|
||||
|
@ -102,7 +103,7 @@ import (
|
|||
// Will wait forever until something is provided on the stdin.
|
||||
func StdinTokenProvider() (string, error) {
|
||||
var v string
|
||||
fmt.Printf("Assume Role MFA token code: ")
|
||||
fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
|
||||
_, err := fmt.Scanln(&v)
|
||||
|
||||
return v, err
|
||||
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
|
@ -24,8 +24,9 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
|||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetUserData returns the userdata that was configured for the service. If
|
||||
|
@ -45,8 +46,9 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
|||
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
|
||||
}
|
||||
})
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetDynamicData uses the path provided to request information from the EC2
|
||||
|
@ -61,8 +63,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
|||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
|
@ -92,6 +92,9 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
svc.Handlers.Send.SwapNamed(request.NamedHandler{
|
||||
Name: corehandlers.SendHandler.Name,
|
||||
Fn: func(r *request.Request) {
|
||||
r.HTTPResponse = &http.Response{
|
||||
Header: http.Header{},
|
||||
}
|
||||
r.Error = awserr.New(
|
||||
request.CanceledErrorCode,
|
||||
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
|
||||
|
|
205
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
205
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -321,9 +321,33 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"apigateway": service{
|
||||
|
@ -383,6 +407,7 @@ var awsPartition = partition{
|
|||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
|
@ -401,6 +426,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
@ -414,6 +440,7 @@ var awsPartition = partition{
|
|||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
|
@ -479,6 +506,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
@ -873,14 +901,26 @@ var awsPartition = partition{
|
|||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"comprehendmedical": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -940,6 +980,7 @@ var awsPartition = partition{
|
|||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
|
@ -991,6 +1032,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
@ -1004,6 +1046,12 @@ var awsPartition = partition{
|
|||
"docdb": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-central-1": endpoint{
|
||||
Hostname: "rds.eu-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-central-1",
|
||||
},
|
||||
},
|
||||
"eu-west-1": endpoint{
|
||||
Hostname: "rds.eu-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
|
@ -1191,6 +1239,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -1293,11 +1342,17 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"fips": endpoint{
|
||||
Hostname: "es-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
@ -1358,10 +1413,11 @@ var awsPartition = partition{
|
|||
"fsx": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"gamelift": service{
|
||||
|
@ -1577,6 +1633,7 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
|
@ -1586,6 +1643,12 @@ var awsPartition = partition{
|
|||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ProdFips": endpoint{
|
||||
Hostname: "kms-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
@ -1625,6 +1688,22 @@ var awsPartition = partition{
|
|||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"license-manager": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"lightsail": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -1677,6 +1756,25 @@ var awsPartition = partition{
|
|||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediaconnect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediaconvert": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -1689,6 +1787,7 @@ var awsPartition = partition{
|
|||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
@ -1821,6 +1920,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -1840,6 +1940,18 @@ var awsPartition = partition{
|
|||
"neptune": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{
|
||||
Hostname: "rds.ap-northeast-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-northeast-1",
|
||||
},
|
||||
},
|
||||
"ap-south-1": endpoint{
|
||||
Hostname: "rds.ap-south-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-south-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-1": endpoint{
|
||||
Hostname: "rds.ap-southeast-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
|
@ -1960,6 +2072,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
@ -2018,6 +2131,8 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
|
@ -2079,11 +2194,18 @@ var awsPartition = partition{
|
|||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
|
@ -2442,6 +2564,9 @@ var awsPartition = partition{
|
|||
"eu-west-2": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"eu-west-3": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"sa-east-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
|
@ -2469,6 +2594,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
@ -2851,6 +2977,7 @@ var awsPartition = partition{
|
|||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
|
@ -2910,9 +3037,13 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -3206,6 +3337,12 @@ var awscnPartition = partition{
|
|||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"gamelift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
|
@ -3486,6 +3623,12 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"athena": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -3551,6 +3694,14 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"comprehend": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -3663,6 +3814,12 @@ var awsusgovPartition = partition{
|
|||
"es": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips": endpoint{
|
||||
Hostname: "es-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
|
@ -3689,6 +3846,12 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"glue": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"guardduty": service{
|
||||
IsRegionalized: boxedTrue,
|
||||
Defaults: endpoint{
|
||||
|
@ -3738,6 +3901,12 @@ var awsusgovPartition = partition{
|
|||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ProdFips": endpoint{
|
||||
Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
|
@ -3973,5 +4142,17 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"waf-regional": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"workspaces": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
1
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
@ -38,6 +38,7 @@ var throttleCodes = map[string]struct{}{
|
|||
"ThrottlingException": {},
|
||||
"RequestLimitExceeded": {},
|
||||
"RequestThrottled": {},
|
||||
"RequestThrottledException": {},
|
||||
"TooManyRequestsException": {}, // Lambda functions
|
||||
"PriorRequestNotComplete": {}, // Route53
|
||||
"TransactionInProgressException": {},
|
||||
|
|
26
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
generated
vendored
Normal file
26
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// +build go1.7
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
22
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
generated
vendored
Normal file
22
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// +build !go1.6,go1.5
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
}
|
23
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build !go1.7,go1.6
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
5
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -407,7 +407,10 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
|||
}
|
||||
}
|
||||
if t == nil {
|
||||
t = &http.Transport{}
|
||||
// Nil transport implies `http.DefaultTransport` should be used. Since
|
||||
// the SDK cannot modify, nor copy the `DefaultTransport` specifying
|
||||
// the values the next closest behavior.
|
||||
t = getCABundleTransport()
|
||||
}
|
||||
|
||||
p, err := loadCertPool(bundle)
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.16.31"
|
||||
const SDKVersion = "1.19.11"
|
||||
|
|
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
@ -155,6 +155,9 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.
|
|||
return awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
}
|
||||
|
||||
name = strings.TrimSpace(name)
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
header.Add(name, str)
|
||||
|
||||
return nil
|
||||
|
@ -170,8 +173,10 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag)
|
|||
return awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
|
||||
}
|
||||
keyStr := strings.TrimSpace(key.String())
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
header.Add(prefix+key.String(), str)
|
||||
header.Add(prefix+keyStr, str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
75
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
75
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
|
@ -2047,7 +2047,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
|
|||
|
||||
// GetBucketLifecycle API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the GetBucketLifecycleConfiguration operation.
|
||||
// No longer used, see the GetBucketLifecycleConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -2428,7 +2428,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
|
|||
|
||||
// GetBucketNotification API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the GetBucketNotificationConfiguration operation.
|
||||
// No longer used, see the GetBucketNotificationConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -5287,7 +5287,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
|
|||
|
||||
// PutBucketLifecycle API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the PutBucketLifecycleConfiguration operation.
|
||||
// No longer used, see the PutBucketLifecycleConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -5600,7 +5600,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
|
|||
|
||||
// PutBucketNotification API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the PutBucketNotificationConfiguraiton operation.
|
||||
// No longer used, see the PutBucketNotificationConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -8937,7 +8937,7 @@ type CreateBucketConfiguration struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies the region where the bucket will be created. If you don't specify
|
||||
// a region, the bucket will be created in US Standard.
|
||||
// a region, the bucket is created in US East (N. Virginia) Region (us-east-1).
|
||||
LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
|
||||
}
|
||||
|
||||
|
@ -11215,7 +11215,7 @@ type FilterRule struct {
|
|||
// The object key name prefix or suffix identifying one or more objects to which
|
||||
// the filtering rule applies. The maximum prefix length is 1,024 characters.
|
||||
// Overlapping prefixes and suffixes are not supported. For more information,
|
||||
// see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Name *string `type:"string" enum:"FilterRuleName"`
|
||||
|
||||
|
@ -15149,7 +15149,7 @@ type LambdaFunctionConfiguration struct {
|
|||
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
|
@ -15332,7 +15332,7 @@ type LifecycleRule struct {
|
|||
NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
|
||||
|
||||
// Prefix identifying one or more objects to which the rule applies. This is
|
||||
// deprecated; use Filter instead.
|
||||
// No longer used; use Filter instead.
|
||||
//
|
||||
// Deprecated: Prefix has been deprecated
|
||||
Prefix *string `deprecated:"true" type:"string"`
|
||||
|
@ -17624,8 +17624,8 @@ type NoncurrentVersionExpiration struct {
|
|||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
}
|
||||
|
||||
|
@ -17646,19 +17646,20 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers
|
|||
}
|
||||
|
||||
// Container for the transition rule that describes when noncurrent objects
|
||||
// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER
|
||||
// storage class. If your bucket is versioning-enabled (or versioning is suspended),
|
||||
// you can set this action to request that Amazon S3 transition noncurrent object
|
||||
// versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage
|
||||
// class at a specific period in the object's lifetime.
|
||||
// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or
|
||||
// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
|
||||
// is suspended), you can set this action to request that Amazon S3 transition
|
||||
// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
|
||||
// GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's
|
||||
// lifetime.
|
||||
type NoncurrentVersionTransition struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
|
@ -17806,7 +17807,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
|
|||
}
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
type NotificationConfigurationFilter struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
@ -20199,7 +20200,8 @@ type PutObjectInput struct {
|
|||
// body cannot be determined automatically.
|
||||
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
|
||||
|
||||
// The base64-encoded 128-bit MD5 digest of the part data.
|
||||
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
|
||||
// auto-populated when using the command from the CLI
|
||||
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
|
@ -20638,6 +20640,7 @@ type PutObjectLockConfigurationInput struct {
|
|||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
|
||||
|
||||
// A token to allow Object Lock to be enabled for an existing bucket.
|
||||
Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
|
||||
}
|
||||
|
||||
|
@ -21146,7 +21149,7 @@ type QueueConfiguration struct {
|
|||
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
|
@ -22120,11 +22123,12 @@ type Rule struct {
|
|||
NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
|
||||
|
||||
// Container for the transition rule that describes when noncurrent objects
|
||||
// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER
|
||||
// storage class. If your bucket is versioning-enabled (or versioning is suspended),
|
||||
// you can set this action to request that Amazon S3 transition noncurrent object
|
||||
// versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or GLACIER storage
|
||||
// class at a specific period in the object's lifetime.
|
||||
// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or
|
||||
// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
|
||||
// is suspended), you can set this action to request that Amazon S3 transition
|
||||
// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
|
||||
// GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's
|
||||
// lifetime.
|
||||
NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
|
||||
|
||||
// Prefix identifying one or more objects to which the rule applies.
|
||||
|
@ -22496,7 +22500,7 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
|
|||
// Amazon S3 uses this to parse object data into records. It returns only records
|
||||
// that match the specified SQL expression. You must also specify the data serialization
|
||||
// format for the response. For more information, see S3Select API Documentation
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
|
||||
type SelectObjectContentInput struct {
|
||||
_ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
|
@ -22534,15 +22538,15 @@ type SelectObjectContentInput struct {
|
|||
RequestProgress *RequestProgress `type:"structure"`
|
||||
|
||||
// The SSE Algorithm used to encrypt the object. For more information, see
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// The SSE Customer Key. For more information, see Server-Side Encryption (Using
|
||||
// Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
// Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
|
||||
|
||||
// The SSE Customer Key MD5. For more information, see Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
}
|
||||
|
||||
|
@ -23347,7 +23351,7 @@ type TopicConfiguration struct {
|
|||
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
|
@ -23537,7 +23541,7 @@ type UploadPartCopyInput struct {
|
|||
// the form bytes=first-last, where the first and last are the zero-based byte
|
||||
// offsets to copy. For example, bytes=0-9 indicates that you want to copy the
|
||||
// first ten bytes of the source. You can copy a range only if the source object
|
||||
// is greater than 5 GB.
|
||||
// is greater than 5 MB.
|
||||
CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
|
||||
|
||||
// Specifies the algorithm to use when decrypting the source object (e.g., AES256).
|
||||
|
@ -24543,6 +24547,9 @@ const (
|
|||
|
||||
// ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value
|
||||
ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING"
|
||||
|
||||
// ObjectStorageClassDeepArchive is a ObjectStorageClass enum value
|
||||
ObjectStorageClassDeepArchive = "DEEP_ARCHIVE"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24673,6 +24680,9 @@ const (
|
|||
|
||||
// StorageClassGlacier is a StorageClass enum value
|
||||
StorageClassGlacier = "GLACIER"
|
||||
|
||||
// StorageClassDeepArchive is a StorageClass enum value
|
||||
StorageClassDeepArchive = "DEEP_ARCHIVE"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24711,6 +24721,9 @@ const (
|
|||
|
||||
// TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value
|
||||
TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING"
|
||||
|
||||
// TransitionStorageClassDeepArchive is a TransitionStorageClass enum value
|
||||
TransitionStorageClassDeepArchive = "DEEP_ARCHIVE"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
13
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
|
@ -593,8 +594,18 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
|
|||
uploadID: u.uploadID,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a presigned URL of the S3 Get Object in order to have parity with
|
||||
// single part upload.
|
||||
getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
})
|
||||
getReq.Config.Credentials = credentials.AnonymousCredentials
|
||||
uploadLocation, _, _ := getReq.PresignRequest(1)
|
||||
|
||||
return &UploadOutput{
|
||||
Location: aws.StringValue(complete.Location),
|
||||
Location: uploadLocation,
|
||||
VersionID: complete.VersionId,
|
||||
UploadID: u.uploadID,
|
||||
}, nil
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
|
@ -39,7 +39,8 @@ type UploadInput struct {
|
|||
// The language the content is in.
|
||||
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
|
||||
|
||||
// The base64-encoded 128-bit MD5 digest of the part data.
|
||||
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
|
||||
// auto-populated when using the command from the CLI
|
||||
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
|
|
13
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
|
@ -26,11 +26,16 @@ func unmarshalError(r *request.Request) {
|
|||
// Bucket exists in a different region, and request needs
|
||||
// to be made to the correct region.
|
||||
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
||||
msg := fmt.Sprintf(
|
||||
"incorrect region, the bucket is not in '%s' region at endpoint '%s'",
|
||||
aws.StringValue(r.Config.Region),
|
||||
aws.StringValue(r.Config.Endpoint),
|
||||
)
|
||||
if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
|
||||
msg += fmt.Sprintf(", bucket is in '%s' region", v)
|
||||
}
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("BucketRegionError",
|
||||
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
|
||||
aws.StringValue(r.Config.Region)),
|
||||
nil),
|
||||
awserr.New("BucketRegionError", msg, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
|
@ -243,6 +244,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
|
||||
output = &AssumeRoleWithSAMLOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
req.Config.Credentials = credentials.AnonymousCredentials
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -425,6 +427,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
|
||||
output = &AssumeRoleWithWebIdentityOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
req.Config.Credentials = credentials.AnonymousCredentials
|
||||
return
|
||||
}
|
||||
|
||||
|
|
12
vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
package sts
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
func init() {
|
||||
initRequest = func(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
|
||||
r.Handlers.Sign.Clear() // these operations are unsigned
|
||||
}
|
||||
}
|
||||
}
|
1
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
1
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
|
@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
|
|
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Deprecated: do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
3
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
3
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
|
@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
|
|
78
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
78
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -185,9 +185,25 @@ type Extension struct {
|
|||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
enc []byte
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
|
@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
|
@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
|
@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
|||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
|
@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
|||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
|
|||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
38
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
38
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -341,26 +341,6 @@ type Message interface {
|
|||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
|
@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion2 = true
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion1 = true
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
|
|
137
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
137
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
|
@ -36,13 +36,7 @@ package proto
|
|||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
|
@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
|
|||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||
return marshalMessageSet(exts, false)
|
||||
}
|
||||
|
||||
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
||||
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(exts)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, exts, deterministic)
|
||||
|
||||
case map[int32]Extension:
|
||||
// This is an old-style extension map.
|
||||
// Wrap it in a new-style XXX_InternalExtensions.
|
||||
ie := XXX_InternalExtensions{
|
||||
p: &struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}{
|
||||
extensionMap: exts,
|
||||
},
|
||||
}
|
||||
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(&ie)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, &ie, deterministic)
|
||||
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
|
@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var mu sync.Locker
|
||||
m, mu = exts.extensionsRead()
|
||||
if m != nil {
|
||||
// Keep the extensions map locked until we're done marshaling to prevent
|
||||
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
||||
// values.
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
}
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 0 && b.Len() > 1 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
5
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
|
@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
|
|||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
|
|
15
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
15
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
|
@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
|
|||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
|
|
31
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
31
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
|
|||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return sprop
|
||||
}
|
||||
|
||||
|
@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties {
|
|||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return prop
|
||||
}
|
||||
if collectStats {
|
||||
stats.Cmiss++
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
|
@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
var oots []interface{}
|
||||
_, _, _, oots = om.XXX_OneofFuncs()
|
||||
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
|
|
45
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
45
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
|
@ -87,6 +87,7 @@ type marshalElemInfo struct {
|
|||
sizer sizer
|
||||
marshaler marshaler
|
||||
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
|
||||
deref bool // dereference the pointer before operating on it; implies isptr
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
|
|||
|
||||
// get oneof implementers
|
||||
var oneofImplementers []interface{}
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
|
||||
n := t.NumField()
|
||||
|
@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
|
|||
panic("tag is not an integer")
|
||||
}
|
||||
wt := wiretype(tags[0])
|
||||
if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
|
||||
t = t.Elem()
|
||||
}
|
||||
sizer, marshaler := typeMarshaler(t, tags, false, false)
|
||||
var deref bool
|
||||
if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
|
||||
t = reflect.PtrTo(t)
|
||||
deref = true
|
||||
}
|
||||
e = &marshalElemInfo{
|
||||
wiretag: uint64(tag)<<3 | wt,
|
||||
tagsize: SizeVarint(uint64(tag) << 3),
|
||||
sizer: sizer,
|
||||
marshaler: marshaler,
|
||||
isptr: t.Kind() == reflect.Ptr,
|
||||
deref: deref,
|
||||
}
|
||||
|
||||
// update cache
|
||||
|
@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
|
|||
|
||||
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
|
||||
fi.field = toField(f)
|
||||
fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
|
||||
fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
|
||||
fi.isPointer = true
|
||||
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
|
||||
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
|
||||
|
@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
|
|||
}
|
||||
}
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
|
||||
// wiretype returns the wire encoding of the type.
|
||||
func wiretype(encoding string) uint64 {
|
||||
switch encoding {
|
||||
|
@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
|||
for _, k := range m.MapKeys() {
|
||||
ki := k.Interface()
|
||||
vi := m.MapIndex(k).Interface()
|
||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
||||
kaddr := toAddrPointer(&ki, false, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
|
||||
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
n += siz + SizeVarint(uint64(siz)) + tagsize
|
||||
}
|
||||
|
@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
|||
for _, k := range keys {
|
||||
ki := k.Interface()
|
||||
vi := m.MapIndex(k).Interface()
|
||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
||||
kaddr := toAddrPointer(&ki, false, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
|
||||
b = appendVarint(b, tag)
|
||||
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
b = appendVarint(b, uint64(siz))
|
||||
|
@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
|
|||
// the last time this function was called.
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
n += ei.sizer(p, ei.tagsize)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
b = append(b, 1<<3|WireEndGroup)
|
||||
if !nerr.Merge(err) {
|
||||
|
@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
n += ei.sizer(p, ei.tagsize)
|
||||
}
|
||||
return n
|
||||
|
@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
|
74
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
74
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
|
@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
|||
u.computeUnmarshalInfo()
|
||||
}
|
||||
if u.isMessageSet {
|
||||
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||
return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||
}
|
||||
var reqMask uint64 // bitmask of required fields we've seen.
|
||||
var errLater error
|
||||
|
@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
}
|
||||
|
||||
// Find any types associated with oneof fields.
|
||||
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
|
||||
if fn.IsValid() {
|
||||
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
|
||||
for i := res.Len() - 1; i >= 0; i-- {
|
||||
v := res.Index(i) // interface{}
|
||||
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
var oneofImplementers []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
for _, v := range oneofImplementers {
|
||||
tptr := reflect.TypeOf(v) // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||
fieldNum, err := strconv.Atoi(tags[1])
|
||||
if err != nil {
|
||||
panic("protobuf tag field not an integer: " + tags[1])
|
||||
}
|
||||
var name string
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = strings.TrimPrefix(tag, "name=")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find the oneof field that this struct implements.
|
||||
// Might take O(n^2) to process all of the oneofs, but who cares.
|
||||
for _, of := range oneofFields {
|
||||
if tptr.Implements(of.ityp) {
|
||||
// We have found the corresponding interface for this struct.
|
||||
// That lets us know where this struct should be stored
|
||||
// when we encounter it during unmarshaling.
|
||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||
fieldNum, err := strconv.Atoi(tags[1])
|
||||
if err != nil {
|
||||
panic("protobuf tag field not an integer: " + tags[1])
|
||||
}
|
||||
var name string
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = strings.TrimPrefix(tag, "name=")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find the oneof field that this struct implements.
|
||||
// Might take O(n^2) to process all of the oneofs, but who cares.
|
||||
for _, of := range oneofFields {
|
||||
if tptr.Implements(of.ityp) {
|
||||
// We have found the corresponding interface for this struct.
|
||||
// That lets us know where this struct should be stored
|
||||
// when we encounter it during unmarshaling.
|
||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Get extension ranges, if any.
|
||||
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
if fn.IsValid() {
|
||||
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
|
||||
panic("a message with extensions, but no extensions field in " + t.Name())
|
||||
|
@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
|
|||
// If there is an error, it returns 0,0.
|
||||
func decodeVarint(b []byte) (uint64, int) {
|
||||
var x, y uint64
|
||||
if len(b) <= 0 {
|
||||
if len(b) == 0 {
|
||||
goto bad
|
||||
}
|
||||
x = uint64(b[0])
|
||||
|
|
141
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
141
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *any.Any) (string, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func Empty(any *any.Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *any.Any, pb proto.Message) bool {
|
||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||
// but it avoids scanning TypeUrl for the slash.
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(pb)
|
||||
prefix := len(any.TypeUrl) - len(name)
|
||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||
}
|
200
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
200
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||
}
|
||||
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Any.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(m, src)
|
||||
}
|
||||
func (m *Any) XXX_Size() int {
|
||||
return xxx_messageInfo_Any.Size(m)
|
||||
}
|
||||
func (m *Any) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
|
||||
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
|
||||
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
|
||||
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
|
||||
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
|
||||
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
|
||||
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
|
||||
}
|
154
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
154
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package ptypes contains code for interacting with well-known types.
|
||||
*/
|
||||
package ptypes
|
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a durpb.Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *durpb.Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
||||
func DurationProto(d time.Duration) *durpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durpb.Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
161
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
161
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||
}
|
||||
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(m, src)
|
||||
}
|
||||
func (m *Duration) XXX_Size() int {
|
||||
return xxx_messageInfo_Duration.Size(m)
|
||||
}
|
||||
func (m *Duration) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 190 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
|
||||
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
|
||||
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
|
||||
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
|
||||
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
|
||||
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
|
||||
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
|
||||
}
|
117
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
117
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
132
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
132
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
func TimestampNow() *tspb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
||||
ts := &tspb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *tspb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
179
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
179
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||
}
|
||||
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||
}
|
||||
func (m *Timestamp) XXX_Size() int {
|
||||
return xxx_messageInfo_Timestamp.Size(m)
|
||||
}
|
||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 191 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
|
||||
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
|
||||
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
|
||||
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
|
||||
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
|
||||
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
|
||||
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
|
||||
}
|
135
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
Normal file
135
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
package simplelru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback func(key interface{}, value interface{})
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
onEvict EvictCallback
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *LRU) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value.(*entry).value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
var ent *list.Element
|
||||
if ent, ok = c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU) Keys() []interface{} {
|
||||
keys := make([]interface{}, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU) Len() int {
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(kv.key, kv.value)
|
||||
}
|
||||
}
|
36
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package simplelru
|
||||
|
||||
// LRUCache is the interface for simple LRU cache.
|
||||
type LRUCache interface {
|
||||
// Adds a value to the cache, returns true if an eviction occurred and
|
||||
// updates the "recently used"-ness of the key.
|
||||
Add(key, value interface{}) bool
|
||||
|
||||
// Returns key's value from the cache and
|
||||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Check if a key exsists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
Peek(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Removes a key from the cache.
|
||||
Remove(key interface{}) bool
|
||||
|
||||
// Removes the oldest entry from cache.
|
||||
RemoveOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns the oldest entry from the cache. #key, value, isFound
|
||||
GetOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
||||
Keys() []interface{}
|
||||
|
||||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clear all cache entries
|
||||
Purge()
|
||||
}
|
6
vendor/github.com/jlaffaye/ftp/.travis.yml
generated
vendored
6
vendor/github.com/jlaffaye/ftp/.travis.yml
generated
vendored
|
@ -1,14 +1,10 @@
|
|||
language: go
|
||||
dist: trusty
|
||||
sudo: required
|
||||
dist: xenial
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
env:
|
||||
- FTP_SERVER=vsftpd
|
||||
- FTP_SERVER=proftpd
|
||||
before_install:
|
||||
- sudo $TRAVIS_BUILD_DIR/.travis/prepare.sh "$FTP_SERVER"
|
||||
- sudo sysctl net.ipv6.conf.lo.disable_ipv6=0
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get github.com/golang/lint/golint
|
||||
|
|
21
vendor/github.com/jlaffaye/ftp/README.md
generated
vendored
21
vendor/github.com/jlaffaye/ftp/README.md
generated
vendored
|
@ -3,6 +3,7 @@
|
|||
[](https://travis-ci.org/jlaffaye/ftp)
|
||||
[](https://coveralls.io/github/jlaffaye/ftp?branch=master)
|
||||
[](http://goreportcard.com/report/jlaffaye/ftp)
|
||||
[](http://godoc.org/github.com/jlaffaye/ftp)
|
||||
|
||||
A FTP client package for Go
|
||||
|
||||
|
@ -12,6 +13,22 @@ A FTP client package for Go
|
|||
go get -u github.com/jlaffaye/ftp
|
||||
```
|
||||
|
||||
## Documentation ##
|
||||
## Example ##
|
||||
|
||||
http://godoc.org/github.com/jlaffaye/ftp
|
||||
```go
|
||||
c, err := ftp.DialWithOptions("ftp.example.org", DialWithTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = c.Login("anonymous", "anonymous")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Do something with the FTP conn
|
||||
|
||||
if err := c.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
|
162
vendor/github.com/jlaffaye/ftp/ftp.go
generated
vendored
162
vendor/github.com/jlaffaye/ftp/ftp.go
generated
vendored
|
@ -5,6 +5,8 @@ package ftp
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
|
@ -25,21 +27,34 @@ const (
|
|||
)
|
||||
|
||||
// ServerConn represents the connection to a remote FTP server.
|
||||
// It should be protected from concurrent accesses.
|
||||
// A single connection only supports one in-flight data connection.
|
||||
// It is not safe to be called concurrently.
|
||||
type ServerConn struct {
|
||||
// Do not use EPSV mode
|
||||
DisableEPSV bool
|
||||
options *dialOptions
|
||||
conn *textproto.Conn
|
||||
host string
|
||||
|
||||
// Timezone that the server is in
|
||||
Location *time.Location
|
||||
|
||||
conn *textproto.Conn
|
||||
host string
|
||||
timeout time.Duration
|
||||
// Server capabilities discovered at runtime
|
||||
features map[string]string
|
||||
skipEPSV bool
|
||||
mlstSupported bool
|
||||
}
|
||||
|
||||
// DialOption represents an option to start a new connection with DialWithOptions
|
||||
type DialOption struct {
|
||||
setup func(do *dialOptions)
|
||||
}
|
||||
|
||||
// dialOptions contains all the options set by DialOption.setup
|
||||
type dialOptions struct {
|
||||
context context.Context
|
||||
dialer net.Dialer
|
||||
tlsConfig tls.Config
|
||||
conn net.Conn
|
||||
disableEPSV bool
|
||||
location *time.Location
|
||||
}
|
||||
|
||||
// Entry describes a file and is returned by List().
|
||||
type Entry struct {
|
||||
Name string
|
||||
|
@ -55,41 +70,44 @@ type Response struct {
|
|||
closed bool
|
||||
}
|
||||
|
||||
// Connect is an alias to Dial, for backward compatibility
|
||||
func Connect(addr string) (*ServerConn, error) {
|
||||
return Dial(addr)
|
||||
}
|
||||
// DialWithOptions connects to the specified address with optinal options
|
||||
func DialWithOptions(addr string, options ...DialOption) (*ServerConn, error) {
|
||||
do := &dialOptions{}
|
||||
for _, option := range options {
|
||||
option.setup(do)
|
||||
}
|
||||
|
||||
// Dial is like DialTimeout with no timeout
|
||||
func Dial(addr string) (*ServerConn, error) {
|
||||
return DialTimeout(addr, 0)
|
||||
}
|
||||
if do.location == nil {
|
||||
do.location = time.UTC
|
||||
}
|
||||
|
||||
// DialTimeout initializes the connection to the specified ftp server address.
|
||||
//
|
||||
// It is generally followed by a call to Login() as most FTP commands require
|
||||
// an authenticated user.
|
||||
func DialTimeout(addr string, timeout time.Duration) (*ServerConn, error) {
|
||||
tconn, err := net.DialTimeout("tcp", addr, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
tconn := do.conn
|
||||
if tconn == nil {
|
||||
ctx := do.context
|
||||
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
conn, err := do.dialer.DialContext(ctx, "tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tconn = conn
|
||||
}
|
||||
|
||||
// Use the resolved IP address in case addr contains a domain name
|
||||
// If we use the domain name, we might not resolve to the same IP.
|
||||
remoteAddr := tconn.RemoteAddr().(*net.TCPAddr)
|
||||
|
||||
conn := textproto.NewConn(tconn)
|
||||
|
||||
c := &ServerConn{
|
||||
conn: conn,
|
||||
host: remoteAddr.IP.String(),
|
||||
timeout: timeout,
|
||||
options: do,
|
||||
features: make(map[string]string),
|
||||
Location: time.UTC,
|
||||
conn: textproto.NewConn(tconn),
|
||||
host: remoteAddr.IP.String(),
|
||||
}
|
||||
|
||||
_, _, err = c.conn.ReadResponse(StatusReady)
|
||||
_, _, err := c.conn.ReadResponse(StatusReady)
|
||||
if err != nil {
|
||||
c.Quit()
|
||||
return nil, err
|
||||
|
@ -108,6 +126,76 @@ func DialTimeout(addr string, timeout time.Duration) (*ServerConn, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
// DialWithTimeout returns a DialOption that configures the ServerConn with specified timeout
|
||||
func DialWithTimeout(timeout time.Duration) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.dialer.Timeout = timeout
|
||||
}}
|
||||
}
|
||||
|
||||
// DialWithDialer returns a DialOption that configures the ServerConn with specified net.Dialer
|
||||
func DialWithDialer(dialer net.Dialer) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.dialer = dialer
|
||||
}}
|
||||
}
|
||||
|
||||
// DialWithNetConn returns a DialOption that configures the ServerConn with the underlying net.Conn
|
||||
func DialWithNetConn(conn net.Conn) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.conn = conn
|
||||
}}
|
||||
}
|
||||
|
||||
// DialWithDisabledEPSV returns a DialOption that configures the ServerConn with EPSV disabled
|
||||
// Note that EPSV is only used when advertised in the server features.
|
||||
func DialWithDisabledEPSV(disabled bool) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.disableEPSV = disabled
|
||||
}}
|
||||
}
|
||||
|
||||
// DialWithLocation returns a DialOption that configures the ServerConn with specified time.Location
|
||||
// The lococation is used to parse the dates sent by the server which are in server's timezone
|
||||
func DialWithLocation(location *time.Location) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.location = location
|
||||
}}
|
||||
}
|
||||
|
||||
// DialWithContext returns a DialOption that configures the ServerConn with specified context
|
||||
// The context will be used for the initial connection setup
|
||||
func DialWithContext(ctx context.Context) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.context = ctx
|
||||
}}
|
||||
}
|
||||
|
||||
// DialWithTLS returns a DialOption that configures the ServerConn with specified TLS config
|
||||
func DialWithTLS(tlsConfig tls.Config) DialOption {
|
||||
return DialOption{func(do *dialOptions) {
|
||||
do.tlsConfig = tlsConfig
|
||||
}}
|
||||
}
|
||||
|
||||
// Connect is an alias to Dial, for backward compatibility
|
||||
func Connect(addr string) (*ServerConn, error) {
|
||||
return Dial(addr)
|
||||
}
|
||||
|
||||
// Dial is like DialTimeout with no timeout
|
||||
func Dial(addr string) (*ServerConn, error) {
|
||||
return DialTimeout(addr, 0)
|
||||
}
|
||||
|
||||
// DialTimeout initializes the connection to the specified ftp server address.
|
||||
//
|
||||
// It is generally followed by a call to Login() as most FTP commands require
|
||||
// an authenticated user.
|
||||
func DialTimeout(addr string, timeout time.Duration) (*ServerConn, error) {
|
||||
return DialWithOptions(addr, DialWithTimeout(timeout))
|
||||
}
|
||||
|
||||
// Login authenticates the client with specified user and password.
|
||||
//
|
||||
// "anonymous"/"anonymous" is a common user/password scheme for FTP servers
|
||||
|
@ -271,13 +359,13 @@ func (c *ServerConn) pasv() (host string, port int, err error) {
|
|||
// getDataConnPort returns a host, port for a new data connection
|
||||
// it uses the best available method to do so
|
||||
func (c *ServerConn) getDataConnPort() (string, int, error) {
|
||||
if !c.DisableEPSV {
|
||||
if !c.options.disableEPSV && !c.skipEPSV {
|
||||
if port, err := c.epsv(); err == nil {
|
||||
return c.host, port, nil
|
||||
}
|
||||
|
||||
// if there is an error, disable EPSV for the next attempts
|
||||
c.DisableEPSV = true
|
||||
// if there is an error, skip EPSV for the next attempts
|
||||
c.skipEPSV = true
|
||||
}
|
||||
|
||||
return c.pasv()
|
||||
|
@ -290,7 +378,7 @@ func (c *ServerConn) openDataConn() (net.Conn, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return net.DialTimeout("tcp", net.JoinHostPort(host, strconv.Itoa(port)), c.timeout)
|
||||
return c.options.dialer.Dial("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
}
|
||||
|
||||
// cmd is a helper function to execute a command and check for the expected FTP
|
||||
|
@ -383,7 +471,7 @@ func (c *ServerConn) List(path string) (entries []*Entry, err error) {
|
|||
scanner := bufio.NewScanner(r)
|
||||
now := time.Now()
|
||||
for scanner.Scan() {
|
||||
entry, err := parser(scanner.Text(), now, c.Location)
|
||||
entry, err := parser(scanner.Text(), now, c.options.location)
|
||||
if err == nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
|
1
vendor/github.com/kardianos/osext/go.mod
generated
vendored
Normal file
1
vendor/github.com/kardianos/osext/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
module github.com/kardianos/osext
|
6
vendor/github.com/nsf/termbox-go/README.md
generated
vendored
6
vendor/github.com/nsf/termbox-go/README.md
generated
vendored
|
@ -1,5 +1,9 @@
|
|||
[](http://godoc.org/github.com/nsf/termbox-go)
|
||||
|
||||
## IMPORTANT
|
||||
|
||||
This library is somewhat not maintained anymore. But I'm glad that it did what I wanted the most. It moved people away from "ncurses" mindset and these days we see both re-implementations of termbox API in various languages and even possibly better libs with similar API design. If you're looking for a Go lib that provides terminal-based user interface facilities, I've heard that https://github.com/gdamore/tcell is good (never used it myself). Also for more complicated interfaces and/or computer games I recommend you to consider using HTML-based UI. Having said that, termbox still somewhat works. In fact I'm writing this line of text right now in godit (which is a text editor written using termbox-go). So, be aware. Good luck and have a nice day.
|
||||
|
||||
## Termbox
|
||||
Termbox is a library that provides a minimalistic API which allows the programmer to write text-based user interfaces. The library is crossplatform and has both terminal-based implementations on *nix operating systems and a winapi console based implementation for windows operating systems. The basic idea is an abstraction of the greatest common subset of features available on all major terminals and other terminal-like APIs in a minimalistic fashion. Small API means it is easy to implement, test, maintain and learn it, that's what makes the termbox a distinct library in its area.
|
||||
|
||||
|
@ -40,6 +44,6 @@ There are also some interesting projects using termbox-go:
|
|||
- [cointop](https://github.com/miguelmota/cointop) is an interactive terminal based UI application for tracking cryptocurrencies.
|
||||
- [pexpo](https://github.com/nnao45/pexpo) is a terminal sending ping tool written in Go.
|
||||
- [jid](https://github.com/simeji/jid) is an interactive JSON drill down tool using filtering queries like jq.
|
||||
|
||||
|
||||
### API reference
|
||||
[godoc.org/github.com/nsf/termbox-go](http://godoc.org/github.com/nsf/termbox-go)
|
||||
|
|
6
vendor/github.com/skratchdot/open-golang/open/exec_windows.go
generated
vendored
6
vendor/github.com/skratchdot/open-golang/open/exec_windows.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
|||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
// "syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -22,12 +22,12 @@ func cleaninput(input string) string {
|
|||
|
||||
func open(input string) *exec.Cmd {
|
||||
cmd := exec.Command(runDll32, cmd, input)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||
//cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func openWith(input string, appName string) *exec.Cmd {
|
||||
cmd := exec.Command("cmd", "/C", "start", "", appName, cleaninput(input))
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||
//cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||
return cmd
|
||||
}
|
||||
|
|
27
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
27
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
|
@ -1,22 +1,31 @@
|
|||
language: go
|
||||
|
||||
stages:
|
||||
- diff
|
||||
- test
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.10.x
|
||||
- go: 1.11.x
|
||||
- go: tip
|
||||
- go: 1.11.x
|
||||
script: diff -u <(echo -n) <(gofmt -d -s .)
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- stage: diff
|
||||
go: 1.12.x
|
||||
script: diff -u <(echo -n) <(gofmt -d -s .)
|
||||
|
||||
before_install:
|
||||
- mkdir -p bin
|
||||
- curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck
|
||||
- curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck
|
||||
- chmod +x bin/shellcheck
|
||||
- go get -u github.com/kyoh86/richgo
|
||||
script:
|
||||
- PATH=$PATH:$PWD/bin go test -v ./...
|
||||
- PATH=$PATH:$PWD/bin richgo test -v ./...
|
||||
- go build
|
||||
- if [ -z $NOVET ]; then
|
||||
diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
|
||||
diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
|
||||
fi
|
||||
|
|
47
vendor/github.com/spf13/cobra/README.md
generated
vendored
47
vendor/github.com/spf13/cobra/README.md
generated
vendored
|
@ -2,25 +2,28 @@
|
|||
|
||||
Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
|
||||
|
||||
Many of the most widely used Go projects are built using Cobra including:
|
||||
|
||||
* [Kubernetes](http://kubernetes.io/)
|
||||
* [Hugo](http://gohugo.io)
|
||||
* [rkt](https://github.com/coreos/rkt)
|
||||
* [etcd](https://github.com/coreos/etcd)
|
||||
* [Moby (former Docker)](https://github.com/moby/moby)
|
||||
* [Docker (distribution)](https://github.com/docker/distribution)
|
||||
* [OpenShift](https://www.openshift.com/)
|
||||
* [Delve](https://github.com/derekparker/delve)
|
||||
* [GopherJS](http://www.gopherjs.org/)
|
||||
* [CockroachDB](http://www.cockroachlabs.com/)
|
||||
* [Bleve](http://www.blevesearch.com/)
|
||||
* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
|
||||
* [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl)
|
||||
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
|
||||
* [rclone](http://rclone.org/)
|
||||
* [nehm](https://github.com/bogem/nehm)
|
||||
* [Pouch](https://github.com/alibaba/pouch)
|
||||
Many of the most widely used Go projects are built using Cobra, such as:
|
||||
[Kubernetes](http://kubernetes.io/),
|
||||
[Hugo](http://gohugo.io),
|
||||
[rkt](https://github.com/coreos/rkt),
|
||||
[etcd](https://github.com/coreos/etcd),
|
||||
[Moby (former Docker)](https://github.com/moby/moby),
|
||||
[Docker (distribution)](https://github.com/docker/distribution),
|
||||
[OpenShift](https://www.openshift.com/),
|
||||
[Delve](https://github.com/derekparker/delve),
|
||||
[GopherJS](http://www.gopherjs.org/),
|
||||
[CockroachDB](http://www.cockroachlabs.com/),
|
||||
[Bleve](http://www.blevesearch.com/),
|
||||
[ProjectAtomic (enterprise)](http://www.projectatomic.io/),
|
||||
[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl),
|
||||
[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack),
|
||||
[rclone](http://rclone.org/),
|
||||
[nehm](https://github.com/bogem/nehm),
|
||||
[Pouch](https://github.com/alibaba/pouch),
|
||||
[Istio](https://istio.io),
|
||||
[Prototool](https://github.com/uber/prototool),
|
||||
[mattermost-server](https://github.com/mattermost/mattermost-server),
|
||||
etc.
|
||||
|
||||
[](https://travis-ci.org/spf13/cobra)
|
||||
[](https://circleci.com/gh/spf13/cobra)
|
||||
|
@ -389,7 +392,7 @@ The following validators are built in:
|
|||
- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
|
||||
- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
|
||||
- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
|
||||
- `ExactValidArgs(int)` = the command will report and error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
|
||||
- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
|
||||
- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
|
||||
|
||||
An example of setting the custom validator:
|
||||
|
@ -399,7 +402,7 @@ var cmd = &cobra.Command{
|
|||
Short: "hello",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errors.New("requires at least one arg")
|
||||
return errors.New("requires a color argument")
|
||||
}
|
||||
if myapp.IsValidColor(args[0]) {
|
||||
return nil
|
||||
|
@ -459,7 +462,7 @@ Echo works a lot like print, except it has a child command.`,
|
|||
}
|
||||
|
||||
var cmdTimes = &cobra.Command{
|
||||
Use: "times [# times] [string to echo]",
|
||||
Use: "times [string to echo]",
|
||||
Short: "Echo anything to the screen more times",
|
||||
Long: `echo things multiple times back to the user by providing
|
||||
a count and a string.`,
|
||||
|
|
7
vendor/github.com/spf13/cobra/bash_completions.go
generated
vendored
7
vendor/github.com/spf13/cobra/bash_completions.go
generated
vendored
|
@ -199,7 +199,8 @@ __%[1]s_handle_flag()
|
|||
fi
|
||||
|
||||
# skip the argument to a two word flag
|
||||
if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
|
||||
if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
|
||||
__%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
|
||||
c=$((c+1))
|
||||
# if we are looking for a flags value, don't show commands
|
||||
if [[ $c -eq $cword ]]; then
|
||||
|
@ -379,6 +380,10 @@ func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
|
|||
}
|
||||
format += "\")\n"
|
||||
buf.WriteString(fmt.Sprintf(format, name))
|
||||
if len(flag.NoOptDefVal) == 0 {
|
||||
format = " two_word_flags+=(\"--%s\")\n"
|
||||
buf.WriteString(fmt.Sprintf(format, name))
|
||||
}
|
||||
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
|
||||
}
|
||||
|
||||
|
|
7
vendor/github.com/spf13/cobra/cobra.go
generated
vendored
7
vendor/github.com/spf13/cobra/cobra.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
|
@ -56,6 +57,12 @@ var MousetrapHelpText string = `This is a command line tool.
|
|||
You need to open cmd.exe and run it from there.
|
||||
`
|
||||
|
||||
// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
|
||||
// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
|
||||
// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
|
||||
// Works only on Microsoft Windows.
|
||||
var MousetrapDisplayDuration time.Duration = 5 * time.Second
|
||||
|
||||
// AddTemplateFunc adds a template function that's available to Usage and Help
|
||||
// template generation.
|
||||
func AddTemplateFunc(name string, tmplFunc interface{}) {
|
||||
|
|
6
vendor/github.com/spf13/cobra/command.go
generated
vendored
6
vendor/github.com/spf13/cobra/command.go
generated
vendored
|
@ -817,13 +817,11 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
|
|||
// overriding
|
||||
c.InitDefaultHelpCmd()
|
||||
|
||||
var args []string
|
||||
args := c.args
|
||||
|
||||
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
|
||||
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
|
||||
args = os.Args[1:]
|
||||
} else {
|
||||
args = c.args
|
||||
}
|
||||
|
||||
var flags []string
|
||||
|
@ -1335,7 +1333,7 @@ func (c *Command) LocalFlags() *flag.FlagSet {
|
|||
return c.lflags
|
||||
}
|
||||
|
||||
// InheritedFlags returns all flags which were inherited from parents commands.
|
||||
// InheritedFlags returns all flags which were inherited from parent commands.
|
||||
func (c *Command) InheritedFlags() *flag.FlagSet {
|
||||
c.mergePersistentFlags()
|
||||
|
||||
|
|
8
vendor/github.com/spf13/cobra/command_win.go
generated
vendored
8
vendor/github.com/spf13/cobra/command_win.go
generated
vendored
|
@ -3,6 +3,7 @@
|
|||
package cobra
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
|
@ -14,7 +15,12 @@ var preExecHookFn = preExecHook
|
|||
func preExecHook(c *Command) {
|
||||
if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
|
||||
c.Print(MousetrapHelpText)
|
||||
time.Sleep(5 * time.Second)
|
||||
if MousetrapDisplayDuration > 0 {
|
||||
time.Sleep(MousetrapDisplayDuration)
|
||||
} else {
|
||||
c.Println("Press return to continue...")
|
||||
fmt.Scanln()
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
|
13
vendor/github.com/spf13/cobra/go.mod
generated
vendored
Normal file
13
vendor/github.com/spf13/cobra/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
module github.com/spf13/cobra
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man v1.0.10
|
||||
github.com/inconshreveable/mousetrap v1.0.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/spf13/viper v1.3.2
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
51
vendor/github.com/spf13/cobra/go.sum
generated
vendored
Normal file
51
vendor/github.com/spf13/cobra/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
6
vendor/github.com/xanzy/ssh-agent/go.mod
generated
vendored
Normal file
6
vendor/github.com/xanzy/ssh-agent/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
module github.com/xanzy/ssh-agent
|
||||
|
||||
require (
|
||||
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0 // indirect
|
||||
)
|
4
vendor/github.com/xanzy/ssh-agent/go.sum
generated
vendored
Normal file
4
vendor/github.com/xanzy/ssh-agent/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 h1:NwxKRvbkH5MsNkvOtPZi3/3kmI8CAzs3mtv+GLQMkNo=
|
||||
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0 h1:bzeyCHgoAyjZjAhvTpks+qM7sdlh4cCSitmXeCEO3B4=
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
Loading…
Add table
Add a link
Reference in a new issue