Signed-off-by: Olivier Gambier <olivier@docker.com>
This commit is contained in:
Olivier Gambier 2016-03-22 10:42:33 -07:00
parent 59401e277b
commit d1444b56e9
141 changed files with 19483 additions and 4205 deletions

46
vendor/google.golang.org/api/gensupport/backoff.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"math/rand"
"time"
)
type BackoffStrategy interface {
// Pause returns the duration of the next pause and true if the operation should be
// retried, or false if no further retries should be attempted.
Pause() (time.Duration, bool)
// Reset restores the strategy to its initial state.
Reset()
}
// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
// The initial pause time is given by Base.
// Once the total pause time exceeds Max, Pause will indicate no further retries.
type ExponentialBackoff struct {
Base time.Duration
Max time.Duration
total time.Duration
n uint
}
func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
if eb.total > eb.Max {
return 0, false
}
// The next pause is selected from randomly from [0, 2^n * Base).
d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
eb.total += d
eb.n++
return d, true
}
func (eb *ExponentialBackoff) Reset() {
eb.n = 0
eb.total = 0
}

77
vendor/google.golang.org/api/gensupport/buffer.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"bytes"
"io"
"google.golang.org/api/googleapi"
)
// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
type ResumableBuffer struct {
media io.Reader
chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
err error // Any error generated when populating chunk by reading media.
// The absolute position of chunk in the underlying media.
off int64
}
func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer {
return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
}
// Chunk returns the current buffered chunk, the offset in the underlying media
// from which the chunk is drawn, and the size of the chunk.
// Successive calls to Chunk return the same chunk between calls to Next.
func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
// There may already be data in chunk if Next has not been called since the previous call to Chunk.
if rb.err == nil && len(rb.chunk) == 0 {
rb.err = rb.loadChunk()
}
return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err
}
// loadChunk will read from media into chunk, up to the capacity of chunk.
func (rb *ResumableBuffer) loadChunk() error {
bufSize := cap(rb.chunk)
rb.chunk = rb.chunk[:bufSize]
read := 0
var err error
for err == nil && read < bufSize {
var n int
n, err = rb.media.Read(rb.chunk[read:])
read += n
}
rb.chunk = rb.chunk[:read]
return err
}
// Next advances to the next chunk, which will be returned by the next call to Chunk.
// Calls to Next without a corresponding prior call to Chunk will have no effect.
func (rb *ResumableBuffer) Next() {
rb.off += int64(len(rb.chunk))
rb.chunk = rb.chunk[0:0]
}
type readerTyper struct {
io.Reader
googleapi.ContentTyper
}
// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
// If ra implements googleapi.ContentTyper, then the returned reader
// will also implement googleapi.ContentTyper, delegating to ra.
func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
r := io.NewSectionReader(ra, 0, size)
if typer, ok := ra.(googleapi.ContentTyper); ok {
return readerTyper{r, typer}
}
return r
}

10
vendor/google.golang.org/api/gensupport/doc.go generated vendored Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gensupport is an internal implementation detail used by code
// generated by the google-api-go-generator tool.
//
// This package may be modified at any time without regard for backwards
// compatibility. It should not be used directly by API users.
package gensupport

172
vendor/google.golang.org/api/gensupport/json.go generated vendored Normal file
View file

@ -0,0 +1,172 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"encoding/json"
"fmt"
"reflect"
"strings"
)
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
// A field is selected if:
// * it has a non-empty value, or
// * its field name is present in forceSendFields, and
// * it is not a nil pointer or nil interface.
// The JSON key for each selected field is taken from the field's json: struct tag.
func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
if len(forceSendFields) == 0 {
return json.Marshal(schema)
}
mustInclude := make(map[string]struct{})
for _, f := range forceSendFields {
mustInclude[f] = struct{}{}
}
dataMap, err := schemaToMap(schema, mustInclude)
if err != nil {
return nil, err
}
return json.Marshal(dataMap)
}
func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
m := make(map[string]interface{})
s := reflect.ValueOf(schema)
st := s.Type()
for i := 0; i < s.NumField(); i++ {
jsonTag := st.Field(i).Tag.Get("json")
if jsonTag == "" {
continue
}
tag, err := parseJSONTag(jsonTag)
if err != nil {
return nil, err
}
if tag.ignore {
continue
}
v := s.Field(i)
f := st.Field(i)
if !includeField(v, f, mustInclude) {
continue
}
// nil maps are treated as empty maps.
if f.Type.Kind() == reflect.Map && v.IsNil() {
m[tag.apiName] = map[string]string{}
continue
}
// nil slices are treated as empty slices.
if f.Type.Kind() == reflect.Slice && v.IsNil() {
m[tag.apiName] = []bool{}
continue
}
if tag.stringFormat {
m[tag.apiName] = formatAsString(v, f.Type.Kind())
} else {
m[tag.apiName] = v.Interface()
}
}
return m, nil
}
// formatAsString returns a string representation of v, dereferencing it first if possible.
func formatAsString(v reflect.Value, kind reflect.Kind) string {
if kind == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
return fmt.Sprintf("%v", v.Interface())
}
// jsonTag represents a restricted version of the struct tag format used by encoding/json.
// It is used to describe the JSON encoding of fields in a Schema struct.
type jsonTag struct {
apiName string
stringFormat bool
ignore bool
}
// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
// The format of the tag must match that generated by the Schema.writeSchemaStruct method
// in the api generator.
func parseJSONTag(val string) (jsonTag, error) {
if val == "-" {
return jsonTag{ignore: true}, nil
}
var tag jsonTag
i := strings.Index(val, ",")
if i == -1 || val[:i] == "" {
return tag, fmt.Errorf("malformed json tag: %s", val)
}
tag = jsonTag{
apiName: val[:i],
}
switch val[i+1:] {
case "omitempty":
case "omitempty,string":
tag.stringFormat = true
default:
return tag, fmt.Errorf("malformed json tag: %s", val)
}
return tag, nil
}
// Reports whether the struct field "f" with value "v" should be included in JSON output.
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
// However, many fields are not pointers, so there would be no way to delete these fields.
// Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
// Deletion will be handled by a separate mechanism.
if f.Type.Kind() == reflect.Ptr && v.IsNil() {
return false
}
// The "any" type is represented as an interface{}. If this interface
// is nil, there is no reasonable representation to send. We ignore
// these fields, for the same reasons as given above for pointers.
if f.Type.Kind() == reflect.Interface && v.IsNil() {
return false
}
_, ok := mustInclude[f.Name]
return ok || !isEmptyValue(v)
}
// isEmptyValue reports whether v is the empty value for its type. This
// implementation is based on that of the encoding/json package, but its
// correctness does not depend on it being identical. What's important is that
// this function return false in situations where v should not be sent as part
// of a PATCH operation.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}

200
vendor/google.golang.org/api/gensupport/media.go generated vendored Normal file
View file

@ -0,0 +1,200 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"google.golang.org/api/googleapi"
)
const sniffBuffSize = 512
func newContentSniffer(r io.Reader) *contentSniffer {
return &contentSniffer{r: r}
}
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
type contentSniffer struct {
r io.Reader
start []byte // buffer for the sniffed bytes.
err error // set to any error encountered while reading bytes to be sniffed.
ctype string // set on first sniff.
sniffed bool // set to true on first sniff.
}
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
// Ensure that the content type is sniffed before any data is consumed from Reader.
_, _ = cs.ContentType()
if len(cs.start) > 0 {
n := copy(p, cs.start)
cs.start = cs.start[n:]
return n, nil
}
// We may have read some bytes into start while sniffing, even if the read ended in an error.
// We should first return those bytes, then the error.
if cs.err != nil {
return 0, cs.err
}
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
return cs.r.Read(p)
}
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
func (cs *contentSniffer) ContentType() (string, bool) {
if cs.sniffed {
return cs.ctype, cs.ctype != ""
}
cs.sniffed = true
// If ReadAll hits EOF, it returns err==nil.
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
// Don't try to detect the content type based on possibly incomplete data.
if cs.err != nil {
return "", false
}
cs.ctype = http.DetectContentType(cs.start)
return cs.ctype, true
}
// DetermineContentType determines the content type of the supplied reader.
// If the content type is already known, it can be specified via ctype.
// Otherwise, the content of media will be sniffed to determine the content type.
// If media implements googleapi.ContentTyper (deprecated), this will be used
// instead of sniffing the content.
// After calling DetectContentType the caller must not perform further reads on
// media, but rather read from the Reader that is returned.
func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
// Note: callers could avoid calling DetectContentType if ctype != "",
// but doing the check inside this function reduces the amount of
// generated code.
if ctype != "" {
return media, ctype
}
// For backwards compatability, allow clients to set content
// type by providing a ContentTyper for media.
if typer, ok := media.(googleapi.ContentTyper); ok {
return media, typer.ContentType()
}
sniffer := newContentSniffer(media)
if ctype, ok := sniffer.ContentType(); ok {
return sniffer, ctype
}
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
return sniffer, ""
}
type typeReader struct {
io.Reader
typ string
}
// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body.
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
type multipartReader struct {
pr *io.PipeReader
pipeOpen bool
ctype string
}
func newMultipartReader(parts []typeReader) *multipartReader {
mp := &multipartReader{pipeOpen: true}
var pw *io.PipeWriter
mp.pr, pw = io.Pipe()
mpw := multipart.NewWriter(pw)
mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
go func() {
for _, part := range parts {
w, err := mpw.CreatePart(typeHeader(part.typ))
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
return
}
_, err = io.Copy(w, part.Reader)
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
return
}
}
mpw.Close()
pw.Close()
}()
return mp
}
func (mp *multipartReader) Read(data []byte) (n int, err error) {
return mp.pr.Read(data)
}
func (mp *multipartReader) Close() error {
if !mp.pipeOpen {
return nil
}
mp.pipeOpen = false
return mp.pr.Close()
}
// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
//
// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
mp := newMultipartReader([]typeReader{
{body, bodyContentType},
{media, mediaContentType},
})
return mp, mp.ctype
}
func typeHeader(contentType string) textproto.MIMEHeader {
h := make(textproto.MIMEHeader)
if contentType != "" {
h.Set("Content-Type", contentType)
}
return h
}
// PrepareUpload determines whether the data in the supplied reader should be
// uploaded in a single request, or in sequential chunks.
// chunkSize is the size of the chunk that media should be split into.
// If chunkSize is non-zero and the contents of media do not fit in a single
// chunk (or there is an error reading media), then media will be returned as a
// ResumableBuffer. Otherwise, media will be returned as a Reader.
//
// After PrepareUpload has been called, media should no longer be used: the
// media content should be accessed via one of the return values.
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader,
*ResumableBuffer) {
if chunkSize == 0 { // do not chunk
return media, nil
}
rb := NewResumableBuffer(media, chunkSize)
rdr, _, _, err := rb.Chunk()
if err == io.EOF { // we can upload this in a single request
return rdr, nil
}
// err might be a non-EOF error. If it is, the next call to rb.Chunk will
// return the same error. Returning a ResumableBuffer ensures that this error
// will be handled at some point.
return nil, rb
}

50
vendor/google.golang.org/api/gensupport/params.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"net/url"
"google.golang.org/api/googleapi"
)
// URLParams is a simplified replacement for url.Values
// that safely builds up URL parameters for encoding.
type URLParams map[string][]string
// Get returns the first value for the given key, or "".
func (u URLParams) Get(key string) string {
vs := u[key]
if len(vs) == 0 {
return ""
}
return vs[0]
}
// Set sets the key to value.
// It replaces any existing values.
func (u URLParams) Set(key, value string) {
u[key] = []string{value}
}
// SetMulti sets the key to an array of values.
// It replaces any existing values.
// Note that values must not be modified after calling SetMulti
// so the caller is responsible for making a copy if necessary.
func (u URLParams) SetMulti(key string, values []string) {
u[key] = values
}
// Encode encodes the values into ``URL encoded'' form
// ("bar=baz&foo=quux") sorted by key.
func (u URLParams) Encode() string {
return url.Values(u).Encode()
}
func SetOptions(u URLParams, opts ...googleapi.CallOption) {
for _, o := range opts {
u.Set(o.Get())
}
}

198
vendor/google.golang.org/api/gensupport/resumable.go generated vendored Normal file
View file

@ -0,0 +1,198 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"fmt"
"io"
"net/http"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader
// when the transfer is not yet complete.
statusResumeIncomplete = 308
// statusTooManyRequests is returned by the storage API if the
// per-project limits have been temporarily exceeded. The request
// should be retried.
// https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
statusTooManyRequests = 429
)
// ResumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type ResumableUpload struct {
Client *http.Client
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
UserAgent string // User-Agent for header of the request
// Media is the object being uploaded.
Media *ResumableBuffer
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
mu sync.Mutex // guards progress
progress int64 // number of bytes uploaded so far
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
Callback func(int64)
// If not specified, a default exponential backoff strategy will be used.
Backoff BackoffStrategy
}
// Progress returns the number of bytes uploaded at this point.
func (rx *ResumableUpload) Progress() int64 {
rx.mu.Lock()
defer rx.mu.Unlock()
return rx.progress
}
// doUploadRequest performs a single HTTP request to upload data.
// off specifies the offset in rx.Media from which data is drawn.
// size is the number of bytes in data.
// final specifies whether data is the final chunk to be uploaded.
func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
req, err := http.NewRequest("POST", rx.URI, data)
if err != nil {
return nil, err
}
req.ContentLength = size
var contentRange string
if final {
if size == 0 {
contentRange = fmt.Sprintf("bytes */%v", off)
} else {
contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
}
} else {
contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
}
req.Header.Set("Content-Range", contentRange)
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", rx.UserAgent)
return ctxhttp.Do(ctx, rx.Client, req)
}
// reportProgress calls a user-supplied callback to report upload progress.
// If old==updated, the callback is not called.
func (rx *ResumableUpload) reportProgress(old, updated int64) {
if updated-old == 0 {
return
}
rx.mu.Lock()
rx.progress = updated
rx.mu.Unlock()
if rx.Callback != nil {
rx.Callback(updated)
}
}
// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
chunk, off, size, err := rx.Media.Chunk()
done := err == io.EOF
if !done && err != nil {
return nil, err
}
res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
if err != nil {
return res, err
}
if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK {
rx.reportProgress(off, off+int64(size))
}
if res.StatusCode == statusResumeIncomplete {
rx.Media.Next()
}
return res, nil
}
func contextDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
// Upload starts the process of a resumable upload with a cancellable context.
// It retries using the provided back off strategy until cancelled or the
// strategy indicates to stop retrying.
// It is called from the auto-generated API code and is not visible to the user.
// rx is private to the auto-generated API code.
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
var pause time.Duration
backoff := rx.Backoff
if backoff == nil {
backoff = DefaultBackoffStrategy()
}
for {
// Ensure that we return in the case of cancelled context, even if pause is 0.
if contextDone(ctx) {
return nil, ctx.Err()
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(pause):
}
resp, err = rx.transferChunk(ctx)
var status int
if resp != nil {
status = resp.StatusCode
}
// Check if we should retry the request.
if shouldRetry(status, err) {
var retry bool
pause, retry = backoff.Pause()
if retry {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
continue
}
}
// If the chunk was uploaded successfully, but there's still
// more to go, upload the next chunk without any delay.
if status == statusResumeIncomplete {
pause = 0
backoff.Reset()
resp.Body.Close()
continue
}
// It's possible for err and resp to both be non-nil here, but we expose a simpler
// contract to our callers: exactly one of resp and err will be non-nil. This means
// that any response body must be closed here before returning a non-nil error.
if err != nil {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return nil, err
}
return resp, nil
}
}

77
vendor/google.golang.org/api/gensupport/retry.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
package gensupport
import (
"io"
"net"
"net/http"
"time"
"golang.org/x/net/context"
)
// Retry invokes the given function, retrying it multiple times if the connection failed or
// the HTTP status response indicates the request should be attempted again. ctx may be nil.
func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
for {
resp, err := f()
var status int
if resp != nil {
status = resp.StatusCode
}
// Return if we shouldn't retry.
pause, retry := backoff.Pause()
if !shouldRetry(status, err) || !retry {
return resp, err
}
// Ensure the response body is closed, if any.
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
// Pause, but still listen to ctx.Done if context is not nil.
var done <-chan struct{}
if ctx != nil {
done = ctx.Done()
}
select {
case <-done:
return nil, ctx.Err()
case <-time.After(pause):
}
}
}
// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
func DefaultBackoffStrategy() BackoffStrategy {
return &ExponentialBackoff{
Base: 250 * time.Millisecond,
Max: 16 * time.Second,
}
}
// shouldRetry returns true if the HTTP response / error indicates that the
// request should be attempted again.
func shouldRetry(status int, err error) bool {
// Retry for 5xx response codes.
if 500 <= status && status < 600 {
return true
}
// Retry on statusTooManyRequests{
if status == statusTooManyRequests {
return true
}
// Retry on unexpected EOFs and temporary network errors.
if err == io.ErrUnexpectedEOF {
return true
}
if err, ok := err.(net.Error); ok {
return err.Temporary()
}
return false
}

View file

@ -9,21 +9,13 @@ package googleapi
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"regexp"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/api/googleapi/internal/uritemplates"
)
@ -42,17 +34,28 @@ type SizeReaderAt interface {
Size() int64
}
// ServerResponse is embedded in each Do response and
// provides the HTTP status code and header sent by the server.
type ServerResponse struct {
// HTTPStatusCode is the server's response status code.
// When using a resource method's Do call, this will always be in the 2xx range.
HTTPStatusCode int
// Header contains the response header fields from the server.
Header http.Header
}
const (
Version = "0.5"
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
// UserAgent is the header string used to identify this package.
UserAgent = "google-api-go-client/" + Version
// uploadPause determines the delay between failed upload attempts
uploadPause = 1 * time.Second
// The default chunk size to use for resumable uplods if not specified by the user.
DefaultUploadChunkSize = 8 * 1024 * 1024
// The minimum chunk size that can be used for resumable uploads. All
// user-specified chunk sizes must be multiple of this value.
MinUploadChunkSize = 256 * 1024
)
// Error contains an error response from the server.
@ -65,6 +68,8 @@ type Error struct {
// Body is the raw response returned by the server.
// It is often but not always JSON, depending on how the request fails.
Body string
// Header contains the response header fields from the server.
Header http.Header
Errors []ErrorItem
}
@ -122,6 +127,34 @@ func CheckResponse(res *http.Response) error {
return jerr.Error
}
}
return &Error{
Code: res.StatusCode,
Body: string(slurp),
Header: res.Header,
}
}
// IsNotModified reports whether err is the result of the
// server replying with http.StatusNotModified.
// Such error values are sometimes returned by "Do" methods
// on calls when If-None-Match is used.
func IsNotModified(err error) bool {
if err == nil {
return false
}
ae, ok := err.(*Error)
return ok && ae.Code == http.StatusNotModified
}
// CheckMediaResponse returns an error (of type *Error) if the response
// status code is not 2xx. Unlike CheckResponse it does not assume the
// body is a JSON error document.
func CheckMediaResponse(res *http.Response) error {
if res.StatusCode >= 200 && res.StatusCode <= 299 {
return nil
}
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
res.Body.Close()
return &Error{
Code: res.StatusCode,
Body: string(slurp),
@ -148,52 +181,6 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
return buf, nil
}
func getMediaType(media io.Reader) (io.Reader, string) {
if typer, ok := media.(ContentTyper); ok {
return media, typer.ContentType()
}
pr, pw := io.Pipe()
typ := "application/octet-stream"
buf, err := ioutil.ReadAll(io.LimitReader(media, 512))
if err != nil {
pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
return pr, typ
}
typ = http.DetectContentType(buf)
mr := io.MultiReader(bytes.NewReader(buf), media)
go func() {
_, err = io.Copy(pw, mr)
if err != nil {
pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
return
}
pw.Close()
}()
return pr, typ
}
// DetectMediaType detects and returns the content type of the provided media.
// If the type can not be determined, "application/octet-stream" is returned.
func DetectMediaType(media io.ReaderAt) string {
if typer, ok := media.(ContentTyper); ok {
return typer.ContentType()
}
typ := "application/octet-stream"
buf := make([]byte, 1024)
n, err := media.ReadAt(buf, 0)
buf = buf[:n]
if err == nil || err == io.EOF {
typ = http.DetectContentType(buf)
}
return typ
}
type Lengther interface {
Len() int
}
// endingWithErrorReader from r until it returns an error. If the
// final error from r is io.EOF and e is non-nil, e is used instead.
type endingWithErrorReader struct {
@ -209,12 +196,6 @@ func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
return
}
func typeHeader(contentType string) textproto.MIMEHeader {
h := make(textproto.MIMEHeader)
h.Set("Content-Type", contentType)
return h
}
// countingWriter counts the number of bytes it receives to write, but
// discards them.
type countingWriter struct {
@ -226,203 +207,65 @@ func (w countingWriter) Write(p []byte) (int, error) {
return len(p), nil
}
// ConditionallyIncludeMedia does nothing if media is nil.
//
// bodyp is an in/out parameter. It should initially point to the
// reader of the application/json (or whatever) payload to send in the
// API request. It's updated to point to the multipart body reader.
//
// ctypep is an in/out parameter. It should initially point to the
// content type of the bodyp, usually "application/json". It's updated
// to the "multipart/related" content type, with random boundary.
//
// The return value is the content-length of the entire multpart body.
func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) {
if media == nil {
return
}
// Get the media type, which might return a different reader instance.
var mediaType string
media, mediaType = getMediaType(media)
body, bodyType := *bodyp, *ctypep
pr, pw := io.Pipe()
mpw := multipart.NewWriter(pw)
*bodyp = pr
*ctypep = "multipart/related; boundary=" + mpw.Boundary()
go func() {
w, err := mpw.CreatePart(typeHeader(bodyType))
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err))
return
}
_, err = io.Copy(w, body)
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err))
return
}
w, err = mpw.CreatePart(typeHeader(mediaType))
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err))
return
}
_, err = io.Copy(w, media)
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err))
return
}
mpw.Close()
pw.Close()
}()
cancel = func() { pw.CloseWithError(errAborted) }
return cancel, true
}
var errAborted = errors.New("googleapi: upload aborted")
// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
type ProgressUpdater func(current, total int64)
// ResumableUpload is used by the generated APIs to provide resumable uploads.
type MediaOption interface {
setOptions(o *MediaOptions)
}
type contentTypeOption string
func (ct contentTypeOption) setOptions(o *MediaOptions) {
o.ContentType = string(ct)
if o.ContentType == "" {
o.ForceEmptyContentType = true
}
}
// ContentType returns a MediaOption which sets the Content-Type header for media uploads.
// If ctype is empty, the Content-Type header will be omitted.
func ContentType(ctype string) MediaOption {
return contentTypeOption(ctype)
}
type chunkSizeOption int
func (cs chunkSizeOption) setOptions(o *MediaOptions) {
size := int(cs)
if size%MinUploadChunkSize != 0 {
size += MinUploadChunkSize - (size % MinUploadChunkSize)
}
o.ChunkSize = size
}
// ChunkSize returns a MediaOption which sets the chunk size for media uploads.
// size will be rounded up to the nearest multiple of 256K.
// Media which contains fewer than size bytes will be uploaded in a single request.
// Media which contains size bytes or more will be uploaded in separate chunks.
// If size is zero, media will be uploaded in a single request.
func ChunkSize(size int) MediaOption {
return chunkSizeOption(size)
}
// MediaOptions stores options for customizing media upload. It is not used by developers directly.
type MediaOptions struct {
ContentType string
ForceEmptyContentType bool
ChunkSize int
}
// ProcessMediaOptions stores options from opts in a MediaOptions.
// It is not used by developers directly.
type ResumableUpload struct {
Client *http.Client
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
UserAgent string // User-Agent for header of the request
// Media is the object being uploaded.
Media io.ReaderAt
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
mu sync.Mutex // guards progress
progress int64 // number of bytes uploaded so far
started bool // whether the upload has been started
// Callback is an optional function that will be called upon every progress update.
Callback ProgressUpdater
}
var (
// rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded.
rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`)
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
chunkSize int64 = 1 << 18
)
// Progress returns the number of bytes uploaded at this point.
func (rx *ResumableUpload) Progress() int64 {
rx.mu.Lock()
defer rx.mu.Unlock()
return rx.progress
}
func (rx *ResumableUpload) transferStatus() (int64, *http.Response, error) {
req, _ := http.NewRequest("POST", rx.URI, nil)
req.ContentLength = 0
req.Header.Set("User-Agent", rx.UserAgent)
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
res, err := rx.Client.Do(req)
if err != nil || res.StatusCode != statusResumeIncomplete {
return 0, res, err
func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize}
for _, o := range opts {
o.setOptions(mo)
}
var start int64
if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err != nil {
return 0, nil, fmt.Errorf("unable to parse range size %v", m[1])
}
start += 1 // Start at the next byte
}
return start, res, nil
}
type chunk struct {
body io.Reader
size int64
err error
}
func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) {
var start int64
var err error
res := &http.Response{}
if rx.started {
start, res, err = rx.transferStatus()
if err != nil || res.StatusCode != statusResumeIncomplete {
return res, err
}
}
rx.started = true
for {
select { // Check for cancellation
case <-ctx.Done():
res.StatusCode = http.StatusRequestTimeout
return res, ctx.Err()
default:
}
reqSize := rx.ContentLength - start
if reqSize > chunkSize {
reqSize = chunkSize
}
r := io.NewSectionReader(rx.Media, start, reqSize)
req, _ := http.NewRequest("POST", rx.URI, r)
req.ContentLength = reqSize
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", rx.UserAgent)
res, err = rx.Client.Do(req)
start += reqSize
if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) {
rx.mu.Lock()
rx.progress = start // keep track of number of bytes sent so far
rx.mu.Unlock()
if rx.Callback != nil {
rx.Callback(start, rx.ContentLength)
}
}
if err != nil || res.StatusCode != statusResumeIncomplete {
break
}
}
return res, err
}
var sleep = time.Sleep // override in unit tests
// Upload starts the process of a resumable upload with a cancellable context.
// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled.
// It is called from the auto-generated API code and is not visible to the user.
// rx is private to the auto-generated API code.
func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {
var res *http.Response
var err error
for {
res, err = rx.transferChunks(ctx)
if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return res, err
}
select { // Check for cancellation
case <-ctx.Done():
res.StatusCode = http.StatusRequestTimeout
return res, ctx.Err()
default:
}
sleep(uploadPause)
}
return res, err
return mo
}
func ResolveRelative(basestr, relstr string) string {
@ -549,3 +392,33 @@ func CombineFields(s []Field) string {
}
return strings.Join(r, ",")
}
// A CallOption is an optional argument to an API call.
// It should be treated as an opaque value by users of Google APIs.
//
// A CallOption is something that configures an API call in a way that is
// not specific to that API; for instance, controlling the quota user for
// an API call is common across many APIs, and is thus a CallOption.
type CallOption interface {
Get() (key, value string)
}
// QuotaUser returns a CallOption that will set the quota user for a call.
// The quota user can be used by server-side applications to control accounting.
// It can be an arbitrary string up to 40 characters, and will override UserIP
// if both are provided.
func QuotaUser(u string) CallOption { return quotaUser(u) }
type quotaUser string
func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) }
// UserIP returns a CallOption that will set the "userIp" parameter of a call.
// This should be the IP address of the originating request.
func UserIP(ip string) CallOption { return userIP(ip) }
type userIP string
func (i userIP) Get() (string, string) { return "userIp", string(i) }
// TODO: Fields too

View file

@ -2,26 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uritemplates is a level 4 implementation of RFC 6570 (URI
// Package uritemplates is a level 3 implementation of RFC 6570 (URI
// Template, http://tools.ietf.org/html/rfc6570).
//
// To use uritemplates, parse a template string and expand it with a value
// map:
//
// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
// values := make(map[string]interface{})
// values["user"] = "jtacoma"
// values["repo"] = "uritemplates"
// expanded, _ := template.ExpandString(values)
// fmt.Printf(expanded)
//
// uritemplates does not support composite values (in Go: slices or maps)
// and so does not qualify as a level 4 implementation.
package uritemplates
import (
"bytes"
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
@ -45,52 +34,47 @@ func pctEncode(src []byte) []byte {
return dst
}
func escape(s string, allowReserved bool) (escaped string) {
func escape(s string, allowReserved bool) string {
if allowReserved {
escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
} else {
escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
return string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
}
return escaped
return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
}
// A UriTemplate is a parsed representation of a URI template.
type UriTemplate struct {
// A uriTemplate is a parsed representation of a URI template.
type uriTemplate struct {
raw string
parts []templatePart
}
// Parse parses a URI template string into a UriTemplate object.
func Parse(rawtemplate string) (template *UriTemplate, err error) {
template = new(UriTemplate)
template.raw = rawtemplate
split := strings.Split(rawtemplate, "{")
template.parts = make([]templatePart, len(split)*2-1)
// parse parses a URI template string into a uriTemplate object.
func parse(rawTemplate string) (*uriTemplate, error) {
split := strings.Split(rawTemplate, "{")
parts := make([]templatePart, len(split)*2-1)
for i, s := range split {
if i == 0 {
if strings.Contains(s, "}") {
err = errors.New("unexpected }")
break
return nil, errors.New("unexpected }")
}
template.parts[i].raw = s
} else {
subsplit := strings.Split(s, "}")
if len(subsplit) != 2 {
err = errors.New("malformed template")
break
}
expression := subsplit[0]
template.parts[i*2-1], err = parseExpression(expression)
if err != nil {
break
}
template.parts[i*2].raw = subsplit[1]
parts[i].raw = s
continue
}
subsplit := strings.Split(s, "}")
if len(subsplit) != 2 {
return nil, errors.New("malformed template")
}
expression := subsplit[0]
var err error
parts[i*2-1], err = parseExpression(expression)
if err != nil {
return nil, err
}
parts[i*2].raw = subsplit[1]
}
if err != nil {
template = nil
}
return template, err
return &uriTemplate{
raw: rawTemplate,
parts: parts,
}, nil
}
type templatePart struct {
@ -160,6 +144,8 @@ func parseExpression(expression string) (result templatePart, err error) {
}
func parseTerm(term string) (result templateTerm, err error) {
// TODO(djd): Remove "*" suffix parsing once we check that no APIs have
// mistakenly used that attribute.
if strings.HasSuffix(term, "*") {
result.explode = true
term = term[:len(term)-1]
@ -185,175 +171,50 @@ func parseTerm(term string) (result templateTerm, err error) {
}
// Expand expands a URI template with a set of values to produce a string.
func (self *UriTemplate) Expand(value interface{}) (string, error) {
values, ismap := value.(map[string]interface{})
if !ismap {
if m, ismap := struct2map(value); !ismap {
return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
} else {
return self.Expand(m)
}
}
func (t *uriTemplate) Expand(values map[string]string) string {
var buf bytes.Buffer
for _, p := range self.parts {
err := p.expand(&buf, values)
if err != nil {
return "", err
}
for _, p := range t.parts {
p.expand(&buf, values)
}
return buf.String(), nil
return buf.String()
}
func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
if len(self.raw) > 0 {
buf.WriteString(self.raw)
return nil
func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) {
if len(tp.raw) > 0 {
buf.WriteString(tp.raw)
return
}
var zeroLen = buf.Len()
buf.WriteString(self.first)
var firstLen = buf.Len()
for _, term := range self.terms {
var first = true
for _, term := range tp.terms {
value, exists := values[term.name]
if !exists {
continue
}
if buf.Len() != firstLen {
buf.WriteString(self.sep)
}
switch v := value.(type) {
case string:
self.expandString(buf, term, v)
case []interface{}:
self.expandArray(buf, term, v)
case map[string]interface{}:
if term.truncate > 0 {
return errors.New("cannot truncate a map expansion")
}
self.expandMap(buf, term, v)
default:
if m, ismap := struct2map(value); ismap {
if term.truncate > 0 {
return errors.New("cannot truncate a map expansion")
}
self.expandMap(buf, term, m)
} else {
str := fmt.Sprintf("%v", value)
self.expandString(buf, term, str)
}
if first {
buf.WriteString(tp.first)
first = false
} else {
buf.WriteString(tp.sep)
}
tp.expandString(buf, term, value)
}
if buf.Len() == firstLen {
original := buf.Bytes()[:zeroLen]
buf.Reset()
buf.Write(original)
}
return nil
}
func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
if self.named {
func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
if tp.named {
buf.WriteString(name)
if empty {
buf.WriteString(self.ifemp)
buf.WriteString(tp.ifemp)
} else {
buf.WriteString("=")
}
}
}
func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
if len(s) > t.truncate && t.truncate > 0 {
s = s[:t.truncate]
}
self.expandName(buf, t.name, len(s) == 0)
buf.WriteString(escape(s, self.allowReserved))
}
func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
if len(a) == 0 {
return
} else if !t.explode {
self.expandName(buf, t.name, false)
}
for i, value := range a {
if t.explode && i > 0 {
buf.WriteString(self.sep)
} else if i > 0 {
buf.WriteString(",")
}
var s string
switch v := value.(type) {
case string:
s = v
default:
s = fmt.Sprintf("%v", v)
}
if len(s) > t.truncate && t.truncate > 0 {
s = s[:t.truncate]
}
if self.named && t.explode {
self.expandName(buf, t.name, len(s) == 0)
}
buf.WriteString(escape(s, self.allowReserved))
}
}
func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
if len(m) == 0 {
return
}
if !t.explode {
self.expandName(buf, t.name, len(m) == 0)
}
var firstLen = buf.Len()
for k, value := range m {
if firstLen != buf.Len() {
if t.explode {
buf.WriteString(self.sep)
} else {
buf.WriteString(",")
}
}
var s string
switch v := value.(type) {
case string:
s = v
default:
s = fmt.Sprintf("%v", v)
}
if t.explode {
buf.WriteString(escape(k, self.allowReserved))
buf.WriteRune('=')
buf.WriteString(escape(s, self.allowReserved))
} else {
buf.WriteString(escape(k, self.allowReserved))
buf.WriteRune(',')
buf.WriteString(escape(s, self.allowReserved))
}
}
}
func struct2map(v interface{}) (map[string]interface{}, bool) {
value := reflect.ValueOf(v)
switch value.Type().Kind() {
case reflect.Ptr:
return struct2map(value.Elem().Interface())
case reflect.Struct:
m := make(map[string]interface{})
for i := 0; i < value.NumField(); i++ {
tag := value.Type().Field(i).Tag
var name string
if strings.Contains(string(tag), ":") {
name = tag.Get("uri")
} else {
name = strings.TrimSpace(string(tag))
}
if len(name) == 0 {
name = value.Type().Field(i).Name
}
m[name] = value.Field(i).Interface()
}
return m, true
}
return nil, false
tp.expandName(buf, t.name, len(s) == 0)
buf.WriteString(escape(s, tp.allowReserved))
}

View file

@ -1,13 +1,13 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uritemplates
func Expand(path string, expansions map[string]string) (string, error) {
template, err := Parse(path)
func Expand(path string, values map[string]string) (string, error) {
template, err := parse(path)
if err != nil {
return "", err
}
values := make(map[string]interface{})
for k, v := range expansions {
values[k] = v
}
return template.Expand(values)
return template.Expand(values), nil
}

View file

@ -148,3 +148,35 @@ func (s Float64s) MarshalJSON() ([]byte, error) {
return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
})
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 { return &v }
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 { return &v }
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 { return &v }
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string { return &v }

View file

@ -1,13 +1,13 @@
{
"kind": "discovery#restDescription",
"etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/ALzt_o9hBNBIakQJmeXCNhSU8II\"",
"etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"",
"discoveryVersion": "v1",
"id": "storage:v1",
"name": "storage",
"version": "v1",
"revision": "20150630",
"title": "Cloud Storage API",
"description": "Lets you store and retrieve potentially-large, immutable data objects.",
"revision": "20160304",
"title": "Cloud Storage JSON API",
"description": "Stores and retrieves potentially large, immutable data objects.",
"ownerDomain": "google.com",
"ownerName": "Google",
"icons": {
@ -75,6 +75,9 @@
"https://www.googleapis.com/auth/cloud-platform": {
"description": "View and manage your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/cloud-platform.read-only": {
"description": "View your data across Google Cloud Platform services"
},
"https://www.googleapis.com/auth/devstorage.full_control": {
"description": "Manage your data and permissions in Google Cloud Storage"
},
@ -271,7 +274,12 @@
},
"timeCreated": {
"type": "string",
"description": "Creation time of the bucket in RFC 3339 format.",
"description": "The creation time of the bucket in RFC 3339 format.",
"format": "date-time"
},
"updated": {
"type": "string",
"description": "The modification time of the bucket in RFC 3339 format.",
"format": "date-time"
},
"versioning": {
@ -566,18 +574,26 @@
},
"contentType": {
"type": "string",
"description": "Content-Type of the object data.",
"annotations": {
"required": [
"storage.objects.insert",
"storage.objects.update"
]
}
"description": "Content-Type of the object data."
},
"crc32c": {
"type": "string",
"description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices."
},
"customerEncryption": {
"type": "object",
"description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
"properties": {
"encryptionAlgorithm": {
"type": "string",
"description": "The encryption algorithm."
},
"keySha256": {
"type": "string",
"description": "SHA256 hash value of the encryption key."
}
}
},
"etag": {
"type": "string",
"description": "HTTP 1.1 Entity tag for the object."
@ -648,6 +664,11 @@
"type": "string",
"description": "Storage class of the object."
},
"timeCreated": {
"type": "string",
"description": "The creation time of the object in RFC 3339 format.",
"format": "date-time"
},
"timeDeleted": {
"type": "string",
"description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
@ -655,7 +676,7 @@
},
"updated": {
"type": "string",
"description": "The creation or modification time of the object in RFC 3339 format. For buckets with versioning enabled, changing an object's metadata does not change this property.",
"description": "The modification time of the object metadata in RFC 3339 format.",
"format": "date-time"
}
}
@ -842,6 +863,7 @@
"entity"
],
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -872,6 +894,7 @@
"$ref": "BucketAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -898,6 +921,7 @@
"$ref": "BucketAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -921,6 +945,7 @@
"$ref": "BucketAccessControls"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -954,6 +979,7 @@
"$ref": "BucketAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -987,6 +1013,7 @@
"$ref": "BucketAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
}
@ -1074,6 +1101,7 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
@ -1211,6 +1239,7 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
@ -1417,6 +1446,7 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
@ -1450,6 +1480,7 @@
"entity"
],
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1480,6 +1511,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1506,6 +1538,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1541,6 +1574,7 @@
"$ref": "ObjectAccessControls"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1574,6 +1608,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1607,6 +1642,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
}
@ -1640,7 +1676,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -1651,6 +1687,7 @@
"entity"
],
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1680,7 +1717,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -1694,6 +1731,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1717,7 +1755,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -1733,6 +1771,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1756,7 +1795,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -1769,6 +1808,7 @@
"$ref": "ObjectAccessControls"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1798,7 +1838,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -1815,6 +1855,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
@ -1844,7 +1885,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -1861,6 +1902,7 @@
"$ref": "ObjectAccessControl"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.full_control"
]
}
@ -1882,7 +1924,7 @@
},
"destinationObject": {
"type": "string",
"description": "Name of the new object.",
"description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
},
@ -1935,7 +1977,8 @@
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_write"
],
"supportsMediaDownload": true
"supportsMediaDownload": true,
"useMediaDownloadService": true
},
"copy": {
"id": "storage.objects.copy",
@ -1945,7 +1988,7 @@
"parameters": {
"destinationBucket": {
"type": "string",
"description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
"description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
},
@ -2051,7 +2094,7 @@
},
"sourceObject": {
"type": "string",
"description": "Name of the source object.",
"description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -2073,7 +2116,8 @@
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_write"
],
"supportsMediaDownload": true
"supportsMediaDownload": true,
"useMediaDownloadService": true
},
"delete": {
"id": "storage.objects.delete",
@ -2119,7 +2163,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -2178,7 +2222,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
},
@ -2205,11 +2249,13 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
],
"supportsMediaDownload": true
"supportsMediaDownload": true,
"useMediaDownloadService": true
},
"insert": {
"id": "storage.objects.insert",
@ -2254,7 +2300,7 @@
},
"name": {
"type": "string",
"description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
"description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"location": "query"
},
"predefinedAcl": {
@ -2307,6 +2353,7 @@
"https://www.googleapis.com/auth/devstorage.read_write"
],
"supportsMediaDownload": true,
"useMediaDownloadService": true,
"supportsMediaUpload": true,
"mediaUpload": {
"accept": [
@ -2385,6 +2432,7 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"
@ -2435,7 +2483,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
},
@ -2504,7 +2552,7 @@
},
"destinationObject": {
"type": "string",
"description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
"description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
},
@ -2615,7 +2663,7 @@
},
"sourceObject": {
"type": "string",
"description": "Name of the source object.",
"description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
}
@ -2682,7 +2730,7 @@
},
"object": {
"type": "string",
"description": "Name of the object.",
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
"required": true,
"location": "path"
},
@ -2736,7 +2784,8 @@
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_write"
],
"supportsMediaDownload": true
"supportsMediaDownload": true,
"useMediaDownloadService": true
},
"watchAll": {
"id": "storage.objects.watchAll",
@ -2803,6 +2852,7 @@
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/devstorage.full_control",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/devstorage.read_write"

File diff suppressed because it is too large Load diff