vendor: update all dependencies

This commit is contained in:
Nick Craig-Wood 2019-02-09 12:50:35 +00:00
parent fb5ee22112
commit 43bc381e90
324 changed files with 37701 additions and 10005 deletions

View file

@ -79,8 +79,13 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
ipRange: v.IPRange,
// Container/Blob-specific SAS parameters
resource: resource,
identifier: v.Identifier,
resource: resource,
identifier: v.Identifier,
cacheControl: v.CacheControl,
contentDisposition: v.ContentDisposition,
contentEncoding: v.ContentEncoding,
contentLanguage: v.ContentLanguage,
contentType: v.ContentType,
// Calculated SAS signature
signature: signature,

View file

@ -229,7 +229,7 @@ func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedI
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
prefix, include, maxResults := o.pointers()
return c.client.ListBlobFlatSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
}
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
@ -242,7 +242,7 @@ func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Mark
return nil, errors.New("snapshots are not supported in this listing operation")
}
prefix, include, maxResults := o.pointers()
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
}
// ListBlobsSegmentOptions defines options available when calling ListBlobs.

View file

@ -80,7 +80,7 @@ func appendToURLPath(u url.URL, name string) url.URL {
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
prefix, include, maxResults := o.pointers()
return s.client.ListContainersSegment(ctx, prefix, marker.val, maxResults, include, nil, nil)
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
}
// ListContainersOptions defines options available when calling ListContainers.

View file

@ -1,3 +1,3 @@
package azblob
const serviceLibVersion = "0.3"
const serviceLibVersion = "0.5"

View file

@ -109,8 +109,8 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
})
}
// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
func redactSigQueryParam(rawQuery string) (bool, string) {
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
func RedactSigQueryParam(rawQuery string) (bool, string) {
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
sigFound := strings.Contains(rawQuery, "?sig=")
if !sigFound {
@ -131,7 +131,7 @@ func redactSigQueryParam(rawQuery string) (bool, string) {
func prepareRequestForLogging(request pipeline.Request) *http.Request {
req := request
if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound {
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
// Make copy so we don't destroy the query parameters we actually need to send in the request
req = request.Copy()
req.Request.URL.RawQuery = rawQuery
@ -161,7 +161,7 @@ func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
req = request.Copy()
url, err := url.Parse(req.Header.Get(key))
if err == nil {
if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound {
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
url.RawQuery = rawQuery
req.Header.Set(xMsCopySourceHeader, url.String())
}

View file

@ -5,6 +5,8 @@ import (
"io"
"net"
"net/http"
"strings"
"sync"
)
const CountToEnd = 0
@ -28,6 +30,9 @@ type HTTPGetterInfo struct {
ETag ETag
}
// FailedReadNotifier is a function type that represents the notification function called when a read fails
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
// RetryReaderOptions contains properties which can help to decide when to do retry.
type RetryReaderOptions struct {
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
@ -36,6 +41,20 @@ type RetryReaderOptions struct {
MaxRetryRequests int
doInjectError bool
doInjectErrorRound int
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
NotifyFailedRead FailedReadNotifier
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
// treated as a fatal (non-retryable) error.
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
// which will be retried.
TreatEarlyCloseAsError bool
}
// retryReader implements io.ReaderCloser methods.
@ -45,17 +64,33 @@ type RetryReaderOptions struct {
// through reading from the new response.
type retryReader struct {
ctx context.Context
response *http.Response
info HTTPGetterInfo
countWasBounded bool
o RetryReaderOptions
getter HTTPGetter
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
responseMu *sync.Mutex
response *http.Response
}
// NewRetryReader creates a retry reader.
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
return &retryReader{ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, o: o}
return &retryReader{
ctx: ctx,
getter: getter,
info: info,
countWasBounded: info.Count != CountToEnd,
response: initialResponse,
responseMu: &sync.Mutex{},
o: o}
}
func (s *retryReader) setResponse(r *http.Response) {
s.responseMu.Lock()
defer s.responseMu.Unlock()
s.response = r
}
func (s *retryReader) Read(p []byte) (n int, err error) {
@ -66,15 +101,19 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
return 0, io.EOF
}
if s.response == nil { // We don't have a response stream to read from, try to get one.
response, err := s.getter(s.ctx, s.info)
s.responseMu.Lock()
resp := s.response
s.responseMu.Unlock()
if resp == nil { // We don't have a response stream to read from, try to get one.
newResponse, err := s.getter(s.ctx, s.info)
if err != nil {
return 0, err
}
// Successful GET; this is the network stream we'll read from.
s.response = response
s.setResponse(newResponse)
resp = newResponse
}
n, err := s.response.Body.Read(p) // Read from the stream
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
// Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound {
@ -89,23 +128,49 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
}
return n, err // Return the return to the caller
}
s.Close() // Error, close stream
s.response = nil // Our stream is no longer good
s.Close() // Error, close stream
s.setResponse(nil) // Our stream is no longer good
// Check the retry count and error code, and decide whether to retry.
if try >= s.o.MaxRetryRequests {
return n, err // All retries exhausted
retriesExhausted := try >= s.o.MaxRetryRequests
_, isNetError := err.(net.Error)
willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted
// Notify, for logging purposes, of any failures
if s.o.NotifyFailedRead != nil {
failureCount := try + 1 // because try is zero-based
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
}
if _, ok := err.(net.Error); ok {
if willRetry {
continue
// Loop around and try to get and read from new stream.
}
return n, err // Not retryable, just return
return n, err // Not retryable, or retries exhausted, so just return
}
}
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
// which is exactly the behaviour we want.
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
// then there are two different types of error that may happen - either the one one we check for here,
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
if s.o.TreatEarlyCloseAsError {
return false // user wants all early closes to be errors, and so not retryable
}
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
}
const ReadOnClosedBodyMessage = "read on closed response body"
func (s *retryReader) Close() error {
s.responseMu.Lock()
defer s.responseMu.Unlock()
if s.response != nil && s.response.Body != nil {
return s.response.Body.Close()
}

View file

@ -47,17 +47,22 @@ const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 860
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type SASQueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
version string `param:"sv"`
services string `param:"ss"`
resourceTypes string `param:"srt"`
protocol SASProtocol `param:"spr"`
startTime time.Time `param:"st"`
expiryTime time.Time `param:"se"`
ipRange IPRange `param:"sip"`
identifier string `param:"si"`
resource string `param:"sr"`
permissions string `param:"sp"`
signature string `param:"sig"`
version string `param:"sv"`
services string `param:"ss"`
resourceTypes string `param:"srt"`
protocol SASProtocol `param:"spr"`
startTime time.Time `param:"st"`
expiryTime time.Time `param:"se"`
ipRange IPRange `param:"sip"`
identifier string `param:"si"`
resource string `param:"sr"`
permissions string `param:"sp"`
signature string `param:"sig"`
cacheControl string `param:"rscc"`
contentDisposition string `param:"rscd"`
contentEncoding string `param:"rsce"`
contentLanguage string `param:"rscl"`
contentType string `param:"rsct"`
}
func (p *SASQueryParameters) Version() string {
@ -99,6 +104,26 @@ func (p *SASQueryParameters) Signature() string {
return p.signature
}
func (p *SASQueryParameters) CacheControl() string {
return p.cacheControl
}
func (p *SASQueryParameters) ContentDisposition() string {
return p.contentDisposition
}
func (p *SASQueryParameters) ContentEncoding() string {
return p.contentEncoding
}
func (p *SASQueryParameters) ContentLanguage() string {
return p.contentLanguage
}
func (p *SASQueryParameters) ContentType() string {
return p.contentType
}
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
type IPRange struct {
Start net.IP // Not specified if length = 0
@ -155,6 +180,16 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool
p.permissions = val
case "sig":
p.signature = val
case "rscc":
p.cacheControl = val
case "rscd":
p.contentDisposition = val
case "rsce":
p.contentEncoding = val
case "rscl":
p.contentLanguage = val
case "rsct":
p.contentType = val
default:
isSASKey = false // We didn't recognize the query parameter
}
@ -200,6 +235,21 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
if p.signature != "" {
v.Add("sig", p.signature)
}
if p.cacheControl != "" {
v.Add("rscc", p.cacheControl)
}
if p.contentDisposition != "" {
v.Add("rscd", p.contentDisposition)
}
if p.contentEncoding != "" {
v.Add("rsce", p.contentEncoding)
}
if p.contentLanguage != "" {
v.Add("rscl", p.contentLanguage)
}
if p.contentType != "" {
v.Add("rsct", p.contentType)
}
return v
}

View file

@ -6,13 +6,14 @@ package azblob
import (
"context"
"encoding/base64"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// appendBlobClient is the client for the AppendBlob methods of the Azblob service.

View file

@ -8,13 +8,14 @@ import (
"context"
"encoding/base64"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// blockBlobClient is the client for the BlockBlob methods of the Azblob service.

View file

@ -4,8 +4,9 @@ package azblob
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/Azure/azure-pipeline-go/pipeline"
"net/url"
"github.com/Azure/azure-pipeline-go/pipeline"
)
const (

View file

@ -7,13 +7,14 @@ import (
"bytes"
"context"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// containerClient is the client for the Container methods of the Azblob service.

View file

@ -55,7 +55,7 @@ func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Marker represents an opaque value used in paged responses.
type Marker struct {
val *string
Val *string
}
// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true
@ -63,14 +63,14 @@ type Marker struct {
// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only
// after the service has returned the final result portion.
func (m Marker) NotDone() bool {
return m.val == nil || *m.val != ""
return m.Val == nil || *m.Val != ""
}
// UnmarshalXML implements the xml.Unmarshaler interface for Marker.
func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var out string
err := d.DecodeElement(&out, &start)
m.val = &out
m.Val = &out
return err
}

View file

@ -7,13 +7,14 @@ import (
"context"
"encoding/base64"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// pageBlobClient is the client for the PageBlob methods of the Azblob service.

View file

@ -7,8 +7,9 @@ import (
"bytes"
"context"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io/ioutil"
"github.com/Azure/azure-pipeline-go/pipeline"
)
type responder func(resp pipeline.Response) (result pipeline.Response, err error)

View file

@ -6,9 +6,10 @@ package azblob
import (
"bytes"
"fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"net"
"net/http"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// if you want to provide custom error handling set this variable to your constructor function

View file

@ -7,12 +7,13 @@ import (
"bytes"
"context"
"encoding/xml"
"github.com/Azure/azure-pipeline-go/pipeline"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// serviceClient is the client for the Service methods of the Azblob service.

View file

@ -5,10 +5,11 @@ package azblob
import (
"fmt"
"github.com/Azure/azure-pipeline-go/pipeline"
"reflect"
"regexp"
"strings"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// Constraint stores constraint name, target field name