forked from TrueCloudLab/distribution
Merge pull request #2603 from yuwaMSFT2/newazuresdk
closes #2496 and #2552
This commit is contained in:
commit
749f6afb45
76 changed files with 9932 additions and 2797 deletions
|
@ -87,7 +87,7 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) {
|
||||||
|
|
||||||
// Create registry container
|
// Create registry container
|
||||||
containerRef := blobClient.GetContainerReference(container)
|
containerRef := blobClient.GetContainerReference(container)
|
||||||
if _, err = containerRef.CreateIfNotExists(); err != nil {
|
if _, err = containerRef.CreateIfNotExists(nil); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +104,8 @@ func (d *driver) Name() string {
|
||||||
|
|
||||||
// GetContent retrieves the content stored at "path" as a []byte.
|
// GetContent retrieves the content stored at "path" as a []byte.
|
||||||
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
||||||
blob, err := d.client.GetBlob(d.container, path)
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
|
blob, err := blobRef.Get(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if is404(err) {
|
if is404(err) {
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
|
@ -118,7 +119,10 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
||||||
|
|
||||||
// PutContent stores the []byte content at a location designated by "path".
|
// PutContent stores the []byte content at a location designated by "path".
|
||||||
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
|
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
|
||||||
if limit := 64 * 1024 * 1024; len(contents) > limit { // max size for block blobs uploaded via single "Put Blob"
|
// max size for block blobs uploaded via single "Put Blob" for version after "2016-05-31"
|
||||||
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks
|
||||||
|
const limit = 256 * 1024 * 1024
|
||||||
|
if len(contents) > limit {
|
||||||
return fmt.Errorf("uploading %d bytes with PutContent is not supported; limit: %d bytes", len(contents), limit)
|
return fmt.Errorf("uploading %d bytes with PutContent is not supported; limit: %d bytes", len(contents), limit)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,41 +137,49 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e
|
||||||
// losing the existing data while migrating it to BlockBlob type. However,
|
// losing the existing data while migrating it to BlockBlob type. However,
|
||||||
// expectation is the clients pushing will be retrying when they get an error
|
// expectation is the clients pushing will be retrying when they get an error
|
||||||
// response.
|
// response.
|
||||||
props, err := d.client.GetBlobProperties(d.container, path)
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
|
err := blobRef.GetProperties(nil)
|
||||||
if err != nil && !is404(err) {
|
if err != nil && !is404(err) {
|
||||||
return fmt.Errorf("failed to get blob properties: %v", err)
|
return fmt.Errorf("failed to get blob properties: %v", err)
|
||||||
}
|
}
|
||||||
if err == nil && props.BlobType != azure.BlobTypeBlock {
|
if err == nil && blobRef.Properties.BlobType != azure.BlobTypeBlock {
|
||||||
if err := d.client.DeleteBlob(d.container, path, nil); err != nil {
|
if err := blobRef.Delete(nil); err != nil {
|
||||||
return fmt.Errorf("failed to delete legacy blob (%s): %v", props.BlobType, err)
|
return fmt.Errorf("failed to delete legacy blob (%s): %v", blobRef.Properties.BlobType, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r := bytes.NewReader(contents)
|
r := bytes.NewReader(contents)
|
||||||
return d.client.CreateBlockBlobFromReader(d.container, path, uint64(len(contents)), r, nil)
|
// reset properties to empty before doing overwrite
|
||||||
|
blobRef.Properties = azure.BlobProperties{}
|
||||||
|
return blobRef.CreateBlockBlobFromReader(r, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
|
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
|
||||||
// given byte offset.
|
// given byte offset.
|
||||||
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
|
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
|
||||||
if ok, err := d.client.BlobExists(d.container, path); err != nil {
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
|
if ok, err := blobRef.Exists(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if !ok {
|
} else if !ok {
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := d.client.GetBlobProperties(d.container, path)
|
err := blobRef.GetProperties(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
info := blobRef.Properties
|
||||||
size := int64(info.ContentLength)
|
size := int64(info.ContentLength)
|
||||||
if offset >= size {
|
if offset >= size {
|
||||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesRange := fmt.Sprintf("%v-", offset)
|
resp, err := blobRef.GetRange(&azure.GetBlobRangeOptions{
|
||||||
resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil)
|
Range: &azure.BlobRange{
|
||||||
|
Start: uint64(offset),
|
||||||
|
End: 0,
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -177,20 +189,22 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
|
||||||
// Writer returns a FileWriter which will store the content written to it
|
// Writer returns a FileWriter which will store the content written to it
|
||||||
// at the location designated by "path" after the call to Commit.
|
// at the location designated by "path" after the call to Commit.
|
||||||
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
|
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
|
||||||
blobExists, err := d.client.BlobExists(d.container, path)
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
|
blobExists, err := blobRef.Exists()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var size int64
|
var size int64
|
||||||
if blobExists {
|
if blobExists {
|
||||||
if append {
|
if append {
|
||||||
blobProperties, err := d.client.GetBlobProperties(d.container, path)
|
err = blobRef.GetProperties(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
blobProperties := blobRef.Properties
|
||||||
size = blobProperties.ContentLength
|
size = blobProperties.ContentLength
|
||||||
} else {
|
} else {
|
||||||
err := d.client.DeleteBlob(d.container, path, nil)
|
err = blobRef.Delete(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -199,7 +213,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
|
||||||
if append {
|
if append {
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
||||||
}
|
}
|
||||||
err := d.client.PutAppendBlob(d.container, path, nil)
|
err = blobRef.PutAppendBlob(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -211,24 +225,21 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
|
||||||
// Stat retrieves the FileInfo for the given path, including the current size
|
// Stat retrieves the FileInfo for the given path, including the current size
|
||||||
// in bytes and the creation time.
|
// in bytes and the creation time.
|
||||||
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
|
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
|
||||||
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
// Check if the path is a blob
|
// Check if the path is a blob
|
||||||
if ok, err := d.client.BlobExists(d.container, path); err != nil {
|
if ok, err := blobRef.Exists(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if ok {
|
} else if ok {
|
||||||
blob, err := d.client.GetBlobProperties(d.container, path)
|
err = blobRef.GetProperties(nil)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mtim, err := time.Parse(http.TimeFormat, blob.LastModified)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
blobProperties := blobRef.Properties
|
||||||
|
|
||||||
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
|
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
|
||||||
Path: path,
|
Path: path,
|
||||||
Size: int64(blob.ContentLength),
|
Size: int64(blobProperties.ContentLength),
|
||||||
ModTime: mtim,
|
ModTime: time.Time(blobProperties.LastModified),
|
||||||
IsDir: false,
|
IsDir: false,
|
||||||
}}, nil
|
}}, nil
|
||||||
}
|
}
|
||||||
|
@ -281,8 +292,10 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) {
|
||||||
// Move moves an object stored at sourcePath to destPath, removing the original
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
||||||
// object.
|
// object.
|
||||||
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
|
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
|
||||||
sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath)
|
srcBlobRef := d.client.GetContainerReference(d.container).GetBlobReference(sourcePath)
|
||||||
err := d.client.CopyBlob(d.container, destPath, sourceBlobURL)
|
sourceBlobURL := srcBlobRef.GetURL()
|
||||||
|
destBlobRef := d.client.GetContainerReference(d.container).GetBlobReference(destPath)
|
||||||
|
err := destBlobRef.Copy(sourceBlobURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if is404(err) {
|
if is404(err) {
|
||||||
return storagedriver.PathNotFoundError{Path: sourcePath}
|
return storagedriver.PathNotFoundError{Path: sourcePath}
|
||||||
|
@ -290,12 +303,13 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.client.DeleteBlob(d.container, sourcePath, nil)
|
return srcBlobRef.Delete(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
||||||
func (d *driver) Delete(ctx context.Context, path string) error {
|
func (d *driver) Delete(ctx context.Context, path string) error {
|
||||||
ok, err := d.client.DeleteBlobIfExists(d.container, path, nil)
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
|
ok, err := blobRef.DeleteIfExists(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -310,7 +324,8 @@ func (d *driver) Delete(ctx context.Context, path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range blobs {
|
for _, b := range blobs {
|
||||||
if err = d.client.DeleteBlob(d.container, b, nil); err != nil {
|
blobRef = d.client.GetContainerReference(d.container).GetBlobReference(b)
|
||||||
|
if err = blobRef.Delete(nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -333,7 +348,15 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int
|
||||||
expiresTime = t
|
expiresTime = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return d.client.GetBlobSASURI(d.container, path, expiresTime, "r")
|
blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path)
|
||||||
|
return blobRef.GetSASURI(azure.BlobSASOptions{
|
||||||
|
BlobServiceSASPermissions: azure.BlobServiceSASPermissions{
|
||||||
|
Read: true,
|
||||||
|
},
|
||||||
|
SASOptions: azure.SASOptions{
|
||||||
|
Expiry: expiresTime,
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Walk traverses a filesystem defined within driver, starting
|
// Walk traverses a filesystem defined within driver, starting
|
||||||
|
@ -467,7 +490,8 @@ func (w *writer) Cancel() error {
|
||||||
return fmt.Errorf("already committed")
|
return fmt.Errorf("already committed")
|
||||||
}
|
}
|
||||||
w.cancelled = true
|
w.cancelled = true
|
||||||
return w.driver.client.DeleteBlob(w.driver.container, w.path, nil)
|
blobRef := w.driver.client.GetContainerReference(w.driver.container).GetBlobReference(w.path)
|
||||||
|
return blobRef.Delete(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) Commit() error {
|
func (w *writer) Commit() error {
|
||||||
|
@ -490,12 +514,13 @@ type blockWriter struct {
|
||||||
|
|
||||||
func (bw *blockWriter) Write(p []byte) (int, error) {
|
func (bw *blockWriter) Write(p []byte) (int, error) {
|
||||||
n := 0
|
n := 0
|
||||||
|
blobRef := bw.client.GetContainerReference(bw.container).GetBlobReference(bw.path)
|
||||||
for offset := 0; offset < len(p); offset += maxChunkSize {
|
for offset := 0; offset < len(p); offset += maxChunkSize {
|
||||||
chunkSize := maxChunkSize
|
chunkSize := maxChunkSize
|
||||||
if offset+chunkSize > len(p) {
|
if offset+chunkSize > len(p) {
|
||||||
chunkSize = len(p) - offset
|
chunkSize = len(p) - offset
|
||||||
}
|
}
|
||||||
err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil)
|
err := blobRef.AppendBlock(p[offset:offset+chunkSize], nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
|
github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
|
||||||
github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
|
github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
|
||||||
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
|
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
|
||||||
github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd
|
github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd
|
||||||
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
|
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
|
||||||
|
@ -19,6 +19,8 @@ github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
|
||||||
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
|
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
|
||||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||||
|
github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
|
||||||
|
github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
|
||||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||||
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
|
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
|
||||||
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
|
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
|
||||||
|
|
5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE
generated
vendored
Normal file
5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
Microsoft Azure-SDK-for-Go
|
||||||
|
Copyright 2014-2017 Microsoft
|
||||||
|
|
||||||
|
This product includes software developed at
|
||||||
|
the Microsoft Corporation (https://www.microsoft.com).
|
321
vendor/github.com/Azure/azure-sdk-for-go/README.md
generated
vendored
321
vendor/github.com/Azure/azure-sdk-for-go/README.md
generated
vendored
|
@ -1,60 +1,301 @@
|
||||||
# Microsoft Azure SDK for Go
|
# Azure SDK for Go
|
||||||
[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go)
|
|
||||||
[![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go)
|
[![godoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go)
|
||||||
|
[![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-sdk-for-go)](https://goreportcard.com/report/github.com/Azure/azure-sdk-for-go)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-sdk-for-go)](https://goreportcard.com/report/github.com/Azure/azure-sdk-for-go)
|
||||||
|
|
||||||
|
azure-sdk-for-go provides Go packages for managing and using Azure services. It has been
|
||||||
|
tested with Go 1.8, 1.9 and 1.10.
|
||||||
|
|
||||||
This is Microsoft Azure's core repository for hosting Go packages which offer a more convenient way of targeting Azure
|
To be notified about updates and changes, subscribe to the [Azure update
|
||||||
REST endpoints. Here, you'll find a mix of code generated by [Autorest](https://github.com/Azure/autorest) and hand
|
feed](https://azure.microsoft.com/updates/).
|
||||||
maintained packages.
|
|
||||||
|
|
||||||
> **NOTE:** This repository is under heavy ongoing development and should be considered a preview. Vendoring your
|
Users of the SDK may prefer to jump right in to our samples repo at
|
||||||
dependencies is always a good idea, but it is doubly important if you're consuming this library.
|
[github.com/Azure-Samples/azure-sdk-for-go-samples][samples_repo].
|
||||||
|
|
||||||
# Installation
|
### Build Details
|
||||||
- If you don't already have it, install [the Go Programming Language](https://golang.org/dl/).
|
|
||||||
- Go get the SDK:
|
|
||||||
|
|
||||||
```
|
Most packages in the SDK are generated from [Azure API specs][azure_rest_specs]
|
||||||
$ go get -u github.com/Azure/azure-sdk-for-go
|
using [Azure/autorest.go][] and [Azure/autorest][]. These generated packages
|
||||||
|
depend on the HTTP client implemented at [Azure/go-autorest][].
|
||||||
|
|
||||||
|
[azure_rest_specs]: https://github.com/Azure/azure-rest-api-specs
|
||||||
|
[Azure/autorest]: https://github.com/Azure/autorest
|
||||||
|
[Azure/autorest.go]: https://github.com/Azure/autorest.go
|
||||||
|
[Azure/go-autorest]: https://github.com/Azure/go-autorest
|
||||||
|
|
||||||
|
The SDK codebase adheres to [semantic versioning](https://semver.org) and thus
|
||||||
|
avoids breaking changes other than at major (x.0.0) releases. However,
|
||||||
|
occasionally Azure API fixes require breaking updates within an individual
|
||||||
|
package; these exceptions are noted in release changelogs.
|
||||||
|
|
||||||
|
To more reliably manage dependencies like the Azure SDK in your applications we
|
||||||
|
recommend [golang/dep](https://github.com/golang/dep).
|
||||||
|
|
||||||
|
# Install and Use:
|
||||||
|
|
||||||
|
### Install
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ go get -u github.com/Azure/azure-sdk-for-go/...
|
||||||
```
|
```
|
||||||
|
|
||||||
> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK
|
or if you use dep, within your repo run:
|
||||||
for Go uses [glide](https://github.com/Masterminds/glide).
|
|
||||||
|
```sh
|
||||||
|
$ dep ensure -add github.com/Azure/azure-sdk-for-go
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need to install Go, follow [the official instructions](https://golang.org/dl/).
|
||||||
|
|
||||||
|
### Use
|
||||||
|
|
||||||
|
For complete examples of many scenarios see [Azure-Samples/azure-sdk-for-go-samples][samples_repo].
|
||||||
|
|
||||||
|
1. Import a package from the [services][services_dir] directory.
|
||||||
|
2. Create and authenticate a client with a `New*Client` func, e.g.
|
||||||
|
`c := compute.NewVirtualMachinesClient(...)`.
|
||||||
|
3. Invoke API methods using the client, e.g. `c.CreateOrUpdate(...)`.
|
||||||
|
4. Handle responses.
|
||||||
|
|
||||||
|
[services_dir]: https://github.com/Azure/azure-sdk-for-go/tree/master/services
|
||||||
|
|
||||||
|
For example, to create a new virtual network (substitute your own values for
|
||||||
|
strings in angle brackets):
|
||||||
|
|
||||||
|
Note: For more on authentication and the `Authorizer` interface see [the next
|
||||||
|
section](#authentication).
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||||
|
"github.com/Azure/go-autorest/autorest/azure/auth"
|
||||||
|
"github.com/Azure/go-autorest/autorest/to"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
vnetClient := network.NewVirtualNetworksClient("<subscriptionID>")
|
||||||
|
authorizer, err := auth.NewAuthorizerFromEnvironment()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
vnetClient.Authorizer = authorizer
|
||||||
|
}
|
||||||
|
|
||||||
|
vnetClient.CreateOrUpdate(context.Background(),
|
||||||
|
"<resourceGroupName>",
|
||||||
|
"<vnetName>",
|
||||||
|
network.VirtualNetwork{
|
||||||
|
Location: to.StringPtr("<azureRegion>"),
|
||||||
|
VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{
|
||||||
|
AddressSpace: &network.AddressSpace{
|
||||||
|
AddressPrefixes: &[]string{"10.0.0.0/8"},
|
||||||
|
},
|
||||||
|
Subnets: &[]network.Subnet{
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("<subnet1Name>"),
|
||||||
|
SubnetPropertiesFormat: &network.SubnetPropertiesFormat{
|
||||||
|
AddressPrefix: to.StringPtr("10.0.0.0/16"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("<subnet2Name>"),
|
||||||
|
SubnetPropertiesFormat: &network.SubnetPropertiesFormat{
|
||||||
|
AddressPrefix: to.StringPtr("10.1.0.0/16"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Authentication
|
||||||
|
|
||||||
|
Most SDK operations require an OAuth token for authentication and authorization. These are
|
||||||
|
made available in the Go SDK For Azure through types implementing the `Authorizer` interface.
|
||||||
|
You can get one from Azure Active Directory using the SDK's
|
||||||
|
[authentication](https://godoc.org/github.com/Azure/go-autorest/autorest/azure/auth) package. The `Authorizer` returned should
|
||||||
|
be set as the authorizer for the resource client, as shown in the [previous section](#use).
|
||||||
|
|
||||||
|
You can get an authorizer in the following ways:
|
||||||
|
1. From the **Environment**:
|
||||||
|
- Use `auth.auth.NewAuthorizerFromEnvironment()`. This call will try to get an authorizer based on the environment
|
||||||
|
variables with different types of credentials in the following order:
|
||||||
|
1. **Client Credentials**: Uses the AAD App Secret for auth.
|
||||||
|
- `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||||
|
- `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||||
|
- `AZURE_CLIENT_SECRET`: Specifies the app secret to use.
|
||||||
|
2. **Client Certificate**: Uses a certificate that was configured on the AAD Service Principal.
|
||||||
|
- `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||||
|
- `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||||
|
- `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use.
|
||||||
|
- `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use.
|
||||||
|
3. **Username Pasword**: Uses a username and a password for auth. This is not recommended. Use `Device Flow` Auth instead for user interactive acccess.
|
||||||
|
- `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate.
|
||||||
|
- `AZURE_CLIENT_ID`: Specifies the app client ID to use.
|
||||||
|
- `AZURE_USERNAME`: Specifies the username to use.
|
||||||
|
- `AZURE_PASSWORD`: Specifies the password to use.
|
||||||
|
4. **MSI**: Only available for apps running in Azure. No configuration needed as it leverages the fact that the app is running in Azure. See [Azure Managed Service Identity](https://docs.microsoft.com/en-us/azure/active-directory/msi-overview).
|
||||||
|
|
||||||
|
- Optionally, the following environment variables can be defined:
|
||||||
|
- `AZURE_ENVIRONMENT`: Specifies the Azure Environment to use. If not set, it defaults to `AzurePublicCloud`. (Not applicable to MSI based auth)
|
||||||
|
- `AZURE_AD_RESOURCE`: Specifies the AAD resource ID to use. If not set, it defaults to `ResourceManagerEndpoint`which allows management operations against Azure Resource Manager.
|
||||||
|
|
||||||
|
2. From an **Auth File**:
|
||||||
|
- Create a service principal and output the file content using `az ad sp create-for-rbac --sdk-auth` from the Azure CLI.For more details see [az ad sp](https://docs.microsoft.com/en-us/cli/azure/ad/sp).
|
||||||
|
- Set environment variable `AZURE_AUTH_LOCATION` for finding the file.
|
||||||
|
- Use `auth.NewAuthorizerFromFile()` for getting the `Authorizer` based on the auth file.
|
||||||
|
|
||||||
|
3. From **Device Flow** by configuring `auth.DeviceFlowConfig` and calling the `Authorizer()` method.
|
||||||
|
|
||||||
|
Note: To authenticate you first need to create a service principal in Azure. To create a new service principal, run
|
||||||
|
`az ad sp create-for-rbac -n "<app_name>"` in the
|
||||||
|
[azure-cli](https://github.com/Azure/azure-cli). See
|
||||||
|
[these docs](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest)
|
||||||
|
for more info. Copy the new principal's ID, secret, and tenant ID for use in your app.
|
||||||
|
|
||||||
|
Alternatively, if your apps are running in Azure, you can now leverage the [Managed Service Identity](https://docs.microsoft.com/en-us/azure/active-directory/msi-overview).
|
||||||
|
|
||||||
# Versioning
|
# Versioning
|
||||||
## SDK Versions
|
|
||||||
The tags in this repository are based on, but do not conform to [SemVer.org's recommendations](http://semver.org/).
|
|
||||||
For now, the "-beta" tag is an indicator that we are still in preview and still are planning on releasing some breaking
|
|
||||||
changes.
|
|
||||||
|
|
||||||
In repositories that are children of this one, [storage for example](https://github.com/Azure/azure-storage-go), we
|
azure-sdk-for-go provides at least a basic Go binding for every Azure API. To
|
||||||
have adopted SemVer.org's recommendations.
|
provide maximum flexibility to users, the SDK even includes previous versions of
|
||||||
|
Azure APIs which are still in use. This enables us to support users of the
|
||||||
|
most updated Azure datacenters, regional datacenters with earlier APIs, and
|
||||||
|
even on-premises installations of Azure Stack.
|
||||||
|
|
||||||
## Azure Versions
|
**SDK versions** apply globally and are tracked by git
|
||||||
Azure services _mostly_ do not use SemVer based versions. Rather, they use profiles identified by dates. One will often
|
[tags](https://github.com/Azure/azure-sdk-for-go/tags). These are in x.y.z form
|
||||||
see this casually referred to as an "APIVersion". At the moment, our SDK only supports the most recent profiles. In
|
and generally adhere to [semantic versioning](https://semver.org) specifications.
|
||||||
order to lock to an API version, one must also lock to an SDK version. However, as discussed in
|
|
||||||
[#517](https://github.com/Azure/azure-sdk-for-go/issues/517), our objective is to reorganize and publish independent
|
|
||||||
packages for each profile. In that way, we'll be able to have parallel support in a single SDK version for all
|
|
||||||
APIVersions supported by Azure.
|
|
||||||
|
|
||||||
# Documentation
|
**Service API versions** are generally represented by a date string and are
|
||||||
|
tracked by offering separate packages for each version. For example, to choose the
|
||||||
|
latest API versions for Compute and Network, use the following imports:
|
||||||
|
|
||||||
- Azure SDK for Go Documentation is available at [GoDoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/).
|
```go
|
||||||
- Azure REST APIs used by packages in this repository are documented at [Microsoft Docs, Azure REST](https://docs.microsoft.com/en-us/rest/api/).
|
import (
|
||||||
- Azure Services are discussed in detail at [Microsoft Docs, Azure Services](https://docs.microsoft.com/en-us/azure/#pivot=services).
|
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
# License
|
Occasionally service-side changes require major changes to existing versions.
|
||||||
|
These cases are noted in the changelog.
|
||||||
|
|
||||||
This project is published under [Apache 2.0 License](LICENSE).
|
All avilable services and versions are listed under the `services/` path in
|
||||||
|
this repo and in [GoDoc][services_godoc]. Run `find ./services -type d
|
||||||
|
-mindepth 3` to list all available service packages.
|
||||||
|
|
||||||
|
[services_godoc]: https://godoc.org/github.com/Azure/azure-sdk-for-go/services
|
||||||
|
|
||||||
# Contribute
|
### Profiles
|
||||||
|
|
||||||
If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft
|
Azure **API profiles** specify subsets of Azure APIs and versions. Profiles can provide:
|
||||||
Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/).
|
|
||||||
|
* **stability** for your application by locking to specific API versions; and/or
|
||||||
|
* **compatibility** for your application with Azure Stack and regional Azure datacenters.
|
||||||
|
|
||||||
|
In the Go SDK, profiles are available under the `profiles/` path and their
|
||||||
|
component API versions are aliases to the true service package under
|
||||||
|
`services/`. You can use them as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/compute/mgmt/compute"
|
||||||
|
import "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/network/mgmt/network"
|
||||||
|
import "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage"
|
||||||
|
```
|
||||||
|
|
||||||
|
The 2017-03-09 profile is the only one currently available and is for use in
|
||||||
|
hybrid Azure and Azure Stack environments. More profiles are under development.
|
||||||
|
|
||||||
|
In addition to versioned profiles, we also provide two special profiles
|
||||||
|
`latest` and `preview`. These *always* include the most recent respective stable or
|
||||||
|
preview API versions for each service, even when updating them to do so causes
|
||||||
|
breaking changes. That is, these do *not* adhere to semantic versioning rules.
|
||||||
|
|
||||||
|
The `latest` and `preview` profiles can help you stay up to date with API
|
||||||
|
updates as you build applications. Since they are by definition not stable,
|
||||||
|
however, they **should not** be used in production apps. Instead, choose the
|
||||||
|
latest specific API version (or an older one if necessary) from the `services/`
|
||||||
|
path.
|
||||||
|
|
||||||
|
As an example, to automatically use the most recent Compute APIs, use one of
|
||||||
|
the following imports:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
|
||||||
|
import "github.com/Azure/azure-sdk-for-go/profiles/preview/compute/mgmt/compute"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Inspecting and Debugging
|
||||||
|
|
||||||
|
All clients implement some handy hooks to help inspect the underlying requests being made to Azure.
|
||||||
|
|
||||||
|
- `RequestInspector`: View and manipulate the go `http.Request` before it's sent
|
||||||
|
- `ResponseInspector`: View the `http.Response` received
|
||||||
|
|
||||||
|
Here is an example of how these can be used with `net/http/httputil` to see requests and responses.
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
vnetClient := network.NewVirtualNetworksClient("<subscriptionID>")
|
||||||
|
vnetClient.RequestInspector = LogRequest()
|
||||||
|
vnetClient.ResponseInspector = LogResponse()
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
func LogRequest() autorest.PrepareDecorator {
|
||||||
|
return func(p autorest.Preparer) autorest.Preparer {
|
||||||
|
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
dump, _ := httputil.DumpRequestOut(r, true)
|
||||||
|
log.Println(string(dump))
|
||||||
|
return r, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func LogResponse() autorest.RespondDecorator {
|
||||||
|
return func(p autorest.Responder) autorest.Responder {
|
||||||
|
return autorest.ResponderFunc(func(r *http.Response) error {
|
||||||
|
err := p.Respond(r)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
dump, _ := httputil.DumpResponse(r, true)
|
||||||
|
log.Println(string(dump))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
# Resources
|
||||||
|
|
||||||
|
- SDK docs are at [godoc.org](https://godoc.org/github.com/Azure/azure-sdk-for-go/).
|
||||||
|
- SDK samples are at [Azure-Samples/azure-sdk-for-go-samples](https://github.com/Azure-Samples/azure-sdk-for-go-samples).
|
||||||
|
- SDK notifications are published via the [Azure update feed](https://azure.microsoft.com/updates/).
|
||||||
|
- Azure API docs are at [docs.microsoft.com/rest/api](https://docs.microsoft.com/rest/api/).
|
||||||
|
- General Azure docs are at [docs.microsoft.com/azure](https://docs.microsoft.com/azure).
|
||||||
|
|
||||||
|
### Other Azure packages for Go
|
||||||
|
|
||||||
|
- [Azure Storage Blobs](https://azure.microsoft.com/services/storage/blobs) - [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go)
|
||||||
|
- [Azure Applications Insights](https://azure.microsoft.com/en-us/services/application-insights/) - [github.com/Microsoft/ApplicationInsights-Go](https://github.com/Microsoft/ApplicationInsights-Go)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Apache 2.0, see [LICENSE](./LICENSE).
|
||||||
|
|
||||||
|
## Contribute
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](./CONTRIBUTING.md).
|
||||||
|
|
||||||
|
[samples_repo]: https://github.com/Azure-Samples/azure-sdk-for-go-samples
|
||||||
|
|
||||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
|
||||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
|
||||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
||||||
|
|
18
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
Normal file
18
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# Azure Storage SDK for Go (Preview)
|
||||||
|
|
||||||
|
:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the
|
||||||
|
future. Consider using the new package for blobs currently in preview at
|
||||||
|
[github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go).
|
||||||
|
New Table, Queue and File packages are also in development.
|
||||||
|
|
||||||
|
The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage
|
||||||
|
[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane
|
||||||
|
resources: containers, blobs, tables, and queues.
|
||||||
|
|
||||||
|
To manage storage *accounts* use Azure Resource Manager (ARM) via the packages
|
||||||
|
at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage).
|
||||||
|
|
||||||
|
This package also supports the [Azure Storage
|
||||||
|
Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
||||||
|
(Windows only).
|
||||||
|
|
91
vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
generated
vendored
Normal file
91
vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PutAppendBlob initializes an empty append blob with specified name. An
|
||||||
|
// append blob must be created using this method before appending blocks.
|
||||||
|
//
|
||||||
|
// See CreateBlockBlobFromReader for more info on creating blobs.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.respondCreation(resp, BlobTypeAppend)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendBlockOptions includes the options for an append block operation
|
||||||
|
type AppendBlockOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
|
||||||
|
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
ContentMD5 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendBlock appends a block to an append blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
|
||||||
|
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
|
||||||
|
params := url.Values{"comp": {"appendblock"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
if options.ContentMD5 {
|
||||||
|
md5sum := md5.Sum(chunk)
|
||||||
|
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.respondCreation(resp, BlobTypeAppend)
|
||||||
|
}
|
67
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
generated
vendored
67
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
generated
vendored
|
@ -1,6 +1,20 @@
|
||||||
// Package storage provides clients for Microsoft Azure Storage Services.
|
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -20,33 +34,39 @@ const (
|
||||||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
||||||
|
|
||||||
// headers
|
// headers
|
||||||
headerAuthorization = "Authorization"
|
headerAcceptCharset = "Accept-Charset"
|
||||||
headerContentLength = "Content-Length"
|
headerAuthorization = "Authorization"
|
||||||
headerDate = "Date"
|
headerContentLength = "Content-Length"
|
||||||
headerXmsDate = "x-ms-date"
|
headerDate = "Date"
|
||||||
headerXmsVersion = "x-ms-version"
|
headerXmsDate = "x-ms-date"
|
||||||
headerContentEncoding = "Content-Encoding"
|
headerXmsVersion = "x-ms-version"
|
||||||
headerContentLanguage = "Content-Language"
|
headerContentEncoding = "Content-Encoding"
|
||||||
headerContentType = "Content-Type"
|
headerContentLanguage = "Content-Language"
|
||||||
headerContentMD5 = "Content-MD5"
|
headerContentType = "Content-Type"
|
||||||
headerIfModifiedSince = "If-Modified-Since"
|
headerContentMD5 = "Content-MD5"
|
||||||
headerIfMatch = "If-Match"
|
headerIfModifiedSince = "If-Modified-Since"
|
||||||
headerIfNoneMatch = "If-None-Match"
|
headerIfMatch = "If-Match"
|
||||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
headerIfNoneMatch = "If-None-Match"
|
||||||
headerRange = "Range"
|
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||||
|
headerRange = "Range"
|
||||||
|
headerDataServiceVersion = "DataServiceVersion"
|
||||||
|
headerMaxDataServiceVersion = "MaxDataServiceVersion"
|
||||||
|
headerContentTransferEncoding = "Content-Transfer-Encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
||||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
if !c.sasClient {
|
||||||
if err != nil {
|
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
headers[headerAuthorization] = authHeader
|
||||||
}
|
}
|
||||||
headers[headerAuthorization] = authHeader
|
|
||||||
return headers, nil
|
return headers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
||||||
canRes, err := c.buildCanonicalizedResource(url, auth)
|
canRes, err := c.buildCanonicalizedResource(url, auth, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -58,15 +78,18 @@ func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth
|
||||||
return c.createAuthorizationHeader(canString, auth), nil
|
return c.createAuthorizationHeader(canString, auth), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) {
|
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
|
||||||
errMsg := "buildCanonicalizedResource error: %s"
|
errMsg := "buildCanonicalizedResource error: %s"
|
||||||
u, err := url.Parse(uri)
|
u, err := url.Parse(uri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf(errMsg, err.Error())
|
return "", fmt.Errorf(errMsg, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
cr := bytes.NewBufferString("/")
|
cr := bytes.NewBufferString("")
|
||||||
cr.WriteString(c.getCanonicalizedAccountName())
|
if c.accountName != StorageEmulatorAccountName || !sas {
|
||||||
|
cr.WriteString("/")
|
||||||
|
cr.WriteString(c.getCanonicalizedAccountName())
|
||||||
|
}
|
||||||
|
|
||||||
if len(u.Path) > 0 {
|
if len(u.Path) > 0 {
|
||||||
// Any portion of the CanonicalizedResource string that is derived from
|
// Any portion of the CanonicalizedResource string that is derived from
|
||||||
|
|
1252
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
1252
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
File diff suppressed because it is too large
Load diff
174
vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
generated
vendored
Normal file
174
vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OverrideHeaders defines overridable response heaedrs in
|
||||||
|
// a request using a SAS URI.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
type OverrideHeaders struct {
|
||||||
|
CacheControl string
|
||||||
|
ContentDisposition string
|
||||||
|
ContentEncoding string
|
||||||
|
ContentLanguage string
|
||||||
|
ContentType string
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobSASOptions are options to construct a blob SAS
|
||||||
|
// URI.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
type BlobSASOptions struct {
|
||||||
|
BlobServiceSASPermissions
|
||||||
|
OverrideHeaders
|
||||||
|
SASOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobServiceSASPermissions includes the available permissions for
|
||||||
|
// blob service SAS URI.
|
||||||
|
type BlobServiceSASPermissions struct {
|
||||||
|
Read bool
|
||||||
|
Add bool
|
||||||
|
Create bool
|
||||||
|
Write bool
|
||||||
|
Delete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p BlobServiceSASPermissions) buildString() string {
|
||||||
|
permissions := ""
|
||||||
|
if p.Read {
|
||||||
|
permissions += "r"
|
||||||
|
}
|
||||||
|
if p.Add {
|
||||||
|
permissions += "a"
|
||||||
|
}
|
||||||
|
if p.Create {
|
||||||
|
permissions += "c"
|
||||||
|
}
|
||||||
|
if p.Write {
|
||||||
|
permissions += "w"
|
||||||
|
}
|
||||||
|
if p.Delete {
|
||||||
|
permissions += "d"
|
||||||
|
}
|
||||||
|
return permissions
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSASURI creates an URL to the blob which contains the Shared
|
||||||
|
// Access Signature with the specified options.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
|
||||||
|
uri := b.GetURL()
|
||||||
|
signedResource := "b"
|
||||||
|
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
permissions := options.BlobServiceSASPermissions.buildString()
|
||||||
|
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
|
||||||
|
start := ""
|
||||||
|
if options.Start != (time.Time{}) {
|
||||||
|
start = options.Start.UTC().Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
expiry := options.Expiry.UTC().Format(time.RFC3339)
|
||||||
|
|
||||||
|
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
||||||
|
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
||||||
|
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
protocols := ""
|
||||||
|
if options.UseHTTPS {
|
||||||
|
protocols = "https"
|
||||||
|
}
|
||||||
|
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := c.computeHmac256(stringToSign)
|
||||||
|
sasParams := url.Values{
|
||||||
|
"sv": {c.apiVersion},
|
||||||
|
"se": {expiry},
|
||||||
|
"sr": {signedResource},
|
||||||
|
"sp": {permissions},
|
||||||
|
"sig": {sig},
|
||||||
|
}
|
||||||
|
|
||||||
|
if start != "" {
|
||||||
|
sasParams.Add("st", start)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.apiVersion >= "2015-04-05" {
|
||||||
|
if protocols != "" {
|
||||||
|
sasParams.Add("spr", protocols)
|
||||||
|
}
|
||||||
|
if options.IP != "" {
|
||||||
|
sasParams.Add("sip", options.IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add override response hedaers
|
||||||
|
addQueryParameter(sasParams, "rscc", headers.CacheControl)
|
||||||
|
addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
|
||||||
|
addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
|
||||||
|
addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
|
||||||
|
addQueryParameter(sasParams, "rsct", headers.ContentType)
|
||||||
|
|
||||||
|
sasURL, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
sasURL.RawQuery = sasParams.Encode()
|
||||||
|
return sasURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) {
|
||||||
|
rscc := headers.CacheControl
|
||||||
|
rscd := headers.ContentDisposition
|
||||||
|
rsce := headers.ContentEncoding
|
||||||
|
rscl := headers.ContentLanguage
|
||||||
|
rsct := headers.ContentType
|
||||||
|
|
||||||
|
if signedVersion >= "2015-02-21" {
|
||||||
|
canonicalizedResource = "/blob" + canonicalizedResource
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
||||||
|
if signedVersion >= "2015-04-05" {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||||
|
if signedVersion >= "2013-08-15" {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||||
|
}
|
114
vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
generated
vendored
114
vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go
generated
vendored
|
@ -1,9 +1,26 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||||
|
@ -38,13 +55,28 @@ type ListContainersParameters struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetContainerReference returns a Container object for the specified container name.
|
// GetContainerReference returns a Container object for the specified container name.
|
||||||
func (b BlobStorageClient) GetContainerReference(name string) Container {
|
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
|
||||||
return Container{
|
return &Container{
|
||||||
bsc: &b,
|
bsc: b,
|
||||||
Name: name,
|
Name: name,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetContainerReferenceFromSASURI returns a Container object for the specified
|
||||||
|
// container SASURI
|
||||||
|
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
|
||||||
|
path := strings.Split(sasuri.Path, "/")
|
||||||
|
if len(path) <= 1 {
|
||||||
|
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
|
||||||
|
}
|
||||||
|
cli := newSASClient().GetBlobService()
|
||||||
|
return &Container{
|
||||||
|
bsc: &cli,
|
||||||
|
Name: path[1],
|
||||||
|
sasuri: sasuri,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ListContainers returns the list of containers in a storage account along with
|
// ListContainers returns the list of containers in a storage account along with
|
||||||
// pagination token and other response details.
|
// pagination token and other response details.
|
||||||
//
|
//
|
||||||
|
@ -54,18 +86,53 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*Con
|
||||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||||
headers := b.client.getStandardHeaders()
|
headers := b.client.getStandardHeaders()
|
||||||
|
|
||||||
var out ContainerListResponse
|
type ContainerAlias struct {
|
||||||
|
bsc *BlobStorageClient
|
||||||
|
Name string `xml:"Name"`
|
||||||
|
Properties ContainerProperties `xml:"Properties"`
|
||||||
|
Metadata BlobMetadata
|
||||||
|
sasuri url.URL
|
||||||
|
}
|
||||||
|
type ContainerListResponseAlias struct {
|
||||||
|
XMLName xml.Name `xml:"EnumerationResults"`
|
||||||
|
Xmlns string `xml:"xmlns,attr"`
|
||||||
|
Prefix string `xml:"Prefix"`
|
||||||
|
Marker string `xml:"Marker"`
|
||||||
|
NextMarker string `xml:"NextMarker"`
|
||||||
|
MaxResults int64 `xml:"MaxResults"`
|
||||||
|
Containers []ContainerAlias `xml:"Containers>Container"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var outAlias ContainerListResponseAlias
|
||||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.Body, &outAlias)
|
||||||
|
if err != nil {
|
||||||
// assign our client to the newly created Container objects
|
return nil, err
|
||||||
for i := range out.Containers {
|
|
||||||
out.Containers[i].bsc = &b
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out := ContainerListResponse{
|
||||||
|
XMLName: outAlias.XMLName,
|
||||||
|
Xmlns: outAlias.Xmlns,
|
||||||
|
Prefix: outAlias.Prefix,
|
||||||
|
Marker: outAlias.Marker,
|
||||||
|
NextMarker: outAlias.NextMarker,
|
||||||
|
MaxResults: outAlias.MaxResults,
|
||||||
|
Containers: make([]Container, len(outAlias.Containers)),
|
||||||
|
}
|
||||||
|
for i, cnt := range outAlias.Containers {
|
||||||
|
out.Containers[i] = Container{
|
||||||
|
bsc: &b,
|
||||||
|
Name: cnt.Name,
|
||||||
|
Properties: cnt.Properties,
|
||||||
|
Metadata: map[string]string(cnt.Metadata),
|
||||||
|
sasuri: cnt.sasuri,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return &out, err
|
return &out, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,11 +149,34 @@ func (p ListContainersParameters) getParameters() url.Values {
|
||||||
out.Set("include", p.Include)
|
out.Set("include", p.Include)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
}
|
||||||
if p.Timeout != 0 {
|
if p.Timeout != 0 {
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func writeMetadata(h http.Header) map[string]string {
|
||||||
|
metadata := make(map[string]string)
|
||||||
|
for k, v := range h {
|
||||||
|
// Can't trust CanonicalHeaderKey() to munge case
|
||||||
|
// reliably. "_" is allowed in identifiers:
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||||
|
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||||
|
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||||
|
// ...but "_" is considered invalid by
|
||||||
|
// CanonicalMIMEHeaderKey in
|
||||||
|
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||||
|
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||||
|
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||||
|
metadata[k] = v[len(v)-1]
|
||||||
|
}
|
||||||
|
return metadata
|
||||||
|
}
|
||||||
|
|
270
vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
generated
vendored
Normal file
270
vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go
generated
vendored
Normal file
|
@ -0,0 +1,270 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockListType is used to filter out types of blocks in a Get Blocks List call
|
||||||
|
// for a block blob.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
|
||||||
|
// block types.
|
||||||
|
type BlockListType string
|
||||||
|
|
||||||
|
// Filters for listing blocks in block blobs
|
||||||
|
const (
|
||||||
|
BlockListTypeAll BlockListType = "all"
|
||||||
|
BlockListTypeCommitted BlockListType = "committed"
|
||||||
|
BlockListTypeUncommitted BlockListType = "uncommitted"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Maximum sizes (per REST API) for various concepts
|
||||||
|
const (
|
||||||
|
MaxBlobBlockSize = 100 * 1024 * 1024
|
||||||
|
MaxBlobPageSize = 4 * 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockStatus defines states a block for a block blob can
|
||||||
|
// be in.
|
||||||
|
type BlockStatus string
|
||||||
|
|
||||||
|
// List of statuses that can be used to refer to a block in a block list
|
||||||
|
const (
|
||||||
|
BlockStatusUncommitted BlockStatus = "Uncommitted"
|
||||||
|
BlockStatusCommitted BlockStatus = "Committed"
|
||||||
|
BlockStatusLatest BlockStatus = "Latest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Block is used to create Block entities for Put Block List
|
||||||
|
// call.
|
||||||
|
type Block struct {
|
||||||
|
ID string
|
||||||
|
Status BlockStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockListResponse contains the response fields from Get Block List call.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
||||||
|
type BlockListResponse struct {
|
||||||
|
XMLName xml.Name `xml:"BlockList"`
|
||||||
|
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
|
||||||
|
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockResponse contains the block information returned
|
||||||
|
// in the GetBlockListCall.
|
||||||
|
type BlockResponse struct {
|
||||||
|
Name string `xml:"Name"`
|
||||||
|
Size int64 `xml:"Size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBlockBlob initializes an empty block blob with no blocks.
|
||||||
|
//
|
||||||
|
// See CreateBlockBlobFromReader for more info on creating blobs.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
|
||||||
|
return b.CreateBlockBlobFromReader(nil, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBlockBlobFromReader initializes a block blob using data from
|
||||||
|
// reader. Size must be the number of bytes read from reader. To
|
||||||
|
// create an empty blob, use size==0 and reader==nil.
|
||||||
|
//
|
||||||
|
// Any headers set in blob.Properties or metadata in blob.Metadata
|
||||||
|
// will be set on the blob.
|
||||||
|
//
|
||||||
|
// The API rejects requests with size > 256 MiB (but this limit is not
|
||||||
|
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
||||||
|
// PutBlock, and PutBlockList.
|
||||||
|
//
|
||||||
|
// To create a blob from scratch, call container.GetBlobReference() to
|
||||||
|
// get an empty blob, fill in blob.Properties and blob.Metadata as
|
||||||
|
// appropriate then call this method.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||||
|
|
||||||
|
headers["Content-Length"] = "0"
|
||||||
|
var n int64
|
||||||
|
var err error
|
||||||
|
if blob != nil {
|
||||||
|
type lener interface {
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
|
||||||
|
if l, ok := blob.(lener); ok {
|
||||||
|
n = int64(l.Len())
|
||||||
|
} else {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
n, err = io.Copy(&buf, blob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blob = &buf
|
||||||
|
}
|
||||||
|
|
||||||
|
headers["Content-Length"] = strconv.FormatInt(n, 10)
|
||||||
|
}
|
||||||
|
b.Properties.ContentLength = n
|
||||||
|
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.respondCreation(resp, BlobTypeBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockOptions includes the options for a put block operation
|
||||||
|
type PutBlockOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
ContentMD5 string `header:"Content-MD5"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlock saves the given data chunk to the specified block blob with
|
||||||
|
// given ID.
|
||||||
|
//
|
||||||
|
// The API rejects chunks larger than 100 MiB (but this limit is not
|
||||||
|
// checked by the SDK).
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
||||||
|
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
|
||||||
|
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockWithLength saves the given data stream of exactly specified size to
|
||||||
|
// the block blob with given ID. It is an alternative to PutBlocks where data
|
||||||
|
// comes as stream but the length is known in advance.
|
||||||
|
//
|
||||||
|
// The API rejects requests with size > 100 MiB (but this limit is not
|
||||||
|
// checked by the SDK).
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
||||||
|
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
|
||||||
|
query := url.Values{
|
||||||
|
"comp": {"block"},
|
||||||
|
"blockid": {blockID},
|
||||||
|
}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", size)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.respondCreation(resp, BlobTypeBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockListOptions includes the options for a put block list operation
|
||||||
|
type PutBlockListOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlockList saves list of blocks to the specified block blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
|
||||||
|
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
|
||||||
|
params := url.Values{"comp": {"blocklist"}}
|
||||||
|
blockListXML := prepareBlockListRequest(blocks)
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockListOptions includes the options for a get block list operation
|
||||||
|
type GetBlockListOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockList retrieves list of blocks in the specified block blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
|
||||||
|
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"blocklist"},
|
||||||
|
"blocklisttype": {string(blockType)},
|
||||||
|
}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
var out BlockListResponse
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
|
return out, err
|
||||||
|
}
|
728
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
728
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
|
@ -1,8 +1,22 @@
|
||||||
// Package storage provides clients for Microsoft Azure Storage Services.
|
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bufio"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
@ -10,12 +24,18 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/version"
|
||||||
|
"github.com/Azure/go-autorest/autorest"
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
"github.com/Azure/go-autorest/autorest/azure"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,9 +46,11 @@ const (
|
||||||
|
|
||||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||||
// basic client is created.
|
// basic client is created.
|
||||||
DefaultAPIVersion = "2015-04-05"
|
DefaultAPIVersion = "2016-05-31"
|
||||||
|
|
||||||
defaultUseHTTPS = true
|
defaultUseHTTPS = true
|
||||||
|
defaultRetryAttempts = 5
|
||||||
|
defaultRetryDuration = time.Second * 5
|
||||||
|
|
||||||
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
||||||
StorageEmulatorAccountName = "devstoreaccount1"
|
StorageEmulatorAccountName = "devstoreaccount1"
|
||||||
|
@ -46,15 +68,79 @@ const (
|
||||||
storageEmulatorQueue = "127.0.0.1:10001"
|
storageEmulatorQueue = "127.0.0.1:10001"
|
||||||
|
|
||||||
userAgentHeader = "User-Agent"
|
userAgentHeader = "User-Agent"
|
||||||
|
|
||||||
|
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
|
||||||
|
|
||||||
|
connectionStringAccountName = "accountname"
|
||||||
|
connectionStringAccountKey = "accountkey"
|
||||||
|
connectionStringEndpointSuffix = "endpointsuffix"
|
||||||
|
connectionStringEndpointProtocol = "defaultendpointsprotocol"
|
||||||
|
|
||||||
|
connectionStringBlobEndpoint = "blobendpoint"
|
||||||
|
connectionStringFileEndpoint = "fileendpoint"
|
||||||
|
connectionStringQueueEndpoint = "queueendpoint"
|
||||||
|
connectionStringTableEndpoint = "tableendpoint"
|
||||||
|
connectionStringSAS = "sharedaccesssignature"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
|
||||||
|
defaultValidStatusCodes = []int{
|
||||||
|
http.StatusRequestTimeout, // 408
|
||||||
|
http.StatusInternalServerError, // 500
|
||||||
|
http.StatusBadGateway, // 502
|
||||||
|
http.StatusServiceUnavailable, // 503
|
||||||
|
http.StatusGatewayTimeout, // 504
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sender sends a request
|
||||||
|
type Sender interface {
|
||||||
|
Send(*Client, *http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultSender is the default sender for the client. It implements
|
||||||
|
// an automatic retry strategy.
|
||||||
|
type DefaultSender struct {
|
||||||
|
RetryAttempts int
|
||||||
|
RetryDuration time.Duration
|
||||||
|
ValidStatusCodes []int
|
||||||
|
attempts int // used for testing
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send is the default retry strategy in the client
|
||||||
|
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := autorest.NewRetriableRequest(req)
|
||||||
|
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
|
||||||
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = c.HTTPClient.Do(rr.Request())
|
||||||
|
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
drainRespBody(resp)
|
||||||
|
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
|
||||||
|
ds.attempts = attempts
|
||||||
|
}
|
||||||
|
ds.attempts++
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
// Client is the object that needs to be constructed to perform
|
// Client is the object that needs to be constructed to perform
|
||||||
// operations on the storage account.
|
// operations on the storage account.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
// HTTPClient is the http.Client used to initiate API
|
// HTTPClient is the http.Client used to initiate API
|
||||||
// requests. If it is nil, http.DefaultClient is used.
|
// requests. http.DefaultClient is used when creating a
|
||||||
|
// client.
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
// Sender is an interface that sends the request. Clients are
|
||||||
|
// created with a DefaultSender. The DefaultSender has an
|
||||||
|
// automatic retry strategy built in. The Sender can be customized.
|
||||||
|
Sender Sender
|
||||||
|
|
||||||
accountName string
|
accountName string
|
||||||
accountKey []byte
|
accountKey []byte
|
||||||
useHTTPS bool
|
useHTTPS bool
|
||||||
|
@ -62,17 +148,13 @@ type Client struct {
|
||||||
baseURL string
|
baseURL string
|
||||||
apiVersion string
|
apiVersion string
|
||||||
userAgent string
|
userAgent string
|
||||||
}
|
sasClient bool
|
||||||
|
accountSASToken url.Values
|
||||||
type storageResponse struct {
|
|
||||||
statusCode int
|
|
||||||
headers http.Header
|
|
||||||
body io.ReadCloser
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataResponse struct {
|
type odataResponse struct {
|
||||||
storageResponse
|
resp *http.Response
|
||||||
odata odataErrorMessage
|
odata odataErrorWrapper
|
||||||
}
|
}
|
||||||
|
|
||||||
// AzureStorageServiceError contains fields of the error response from
|
// AzureStorageServiceError contains fields of the error response from
|
||||||
|
@ -85,22 +167,25 @@ type AzureStorageServiceError struct {
|
||||||
QueryParameterName string `xml:"QueryParameterName"`
|
QueryParameterName string `xml:"QueryParameterName"`
|
||||||
QueryParameterValue string `xml:"QueryParameterValue"`
|
QueryParameterValue string `xml:"QueryParameterValue"`
|
||||||
Reason string `xml:"Reason"`
|
Reason string `xml:"Reason"`
|
||||||
|
Lang string
|
||||||
StatusCode int
|
StatusCode int
|
||||||
RequestID string
|
RequestID string
|
||||||
|
Date string
|
||||||
|
APIVersion string
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataErrorMessageMessage struct {
|
type odataErrorMessage struct {
|
||||||
Lang string `json:"lang"`
|
Lang string `json:"lang"`
|
||||||
Value string `json:"value"`
|
Value string `json:"value"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataErrorMessageInternal struct {
|
type odataError struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
Message odataErrorMessageMessage `json:"message"`
|
Message odataErrorMessage `json:"message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type odataErrorMessage struct {
|
type odataErrorWrapper struct {
|
||||||
Err odataErrorMessageInternal `json:"odata.error"`
|
Err odataError `json:"odata.error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||||
|
@ -108,6 +193,7 @@ type odataErrorMessage struct {
|
||||||
type UnexpectedStatusCodeError struct {
|
type UnexpectedStatusCodeError struct {
|
||||||
allowed []int
|
allowed []int
|
||||||
got int
|
got int
|
||||||
|
inner error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e UnexpectedStatusCodeError) Error() string {
|
func (e UnexpectedStatusCodeError) Error() string {
|
||||||
|
@ -118,7 +204,7 @@ func (e UnexpectedStatusCodeError) Error() string {
|
||||||
for _, v := range e.allowed {
|
for _, v := range e.allowed {
|
||||||
expected = append(expected, s(v))
|
expected = append(expected, s(v))
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
|
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Got is the actual status code returned by Azure.
|
// Got is the actual status code returned by Azure.
|
||||||
|
@ -126,6 +212,60 @@ func (e UnexpectedStatusCodeError) Got() int {
|
||||||
return e.got
|
return e.got
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Inner returns any inner error info.
|
||||||
|
func (e UnexpectedStatusCodeError) Inner() error {
|
||||||
|
return e.inner
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientFromConnectionString creates a Client from the connection string.
|
||||||
|
func NewClientFromConnectionString(input string) (Client, error) {
|
||||||
|
// build a map of connection string key/value pairs
|
||||||
|
parts := map[string]string{}
|
||||||
|
for _, pair := range strings.Split(input, ";") {
|
||||||
|
if pair == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
equalDex := strings.IndexByte(pair, '=')
|
||||||
|
if equalDex <= 0 {
|
||||||
|
return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := strings.TrimSpace(pair[equalDex+1:])
|
||||||
|
key := strings.TrimSpace(strings.ToLower(pair[:equalDex]))
|
||||||
|
parts[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: validate parameter sets?
|
||||||
|
|
||||||
|
if parts[connectionStringAccountName] == StorageEmulatorAccountName {
|
||||||
|
return NewEmulatorClient()
|
||||||
|
}
|
||||||
|
|
||||||
|
if parts[connectionStringSAS] != "" {
|
||||||
|
endpoint := ""
|
||||||
|
if parts[connectionStringBlobEndpoint] != "" {
|
||||||
|
endpoint = parts[connectionStringBlobEndpoint]
|
||||||
|
} else if parts[connectionStringFileEndpoint] != "" {
|
||||||
|
endpoint = parts[connectionStringFileEndpoint]
|
||||||
|
} else if parts[connectionStringQueueEndpoint] != "" {
|
||||||
|
endpoint = parts[connectionStringQueueEndpoint]
|
||||||
|
} else {
|
||||||
|
endpoint = parts[connectionStringTableEndpoint]
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS])
|
||||||
|
}
|
||||||
|
|
||||||
|
useHTTPS := defaultUseHTTPS
|
||||||
|
if parts[connectionStringEndpointProtocol] != "" {
|
||||||
|
useHTTPS = parts[connectionStringEndpointProtocol] == "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey],
|
||||||
|
parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS)
|
||||||
|
}
|
||||||
|
|
||||||
// NewBasicClient constructs a Client with given storage service name and
|
// NewBasicClient constructs a Client with given storage service name and
|
||||||
// key.
|
// key.
|
||||||
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
||||||
|
@ -153,13 +293,13 @@ func NewEmulatorClient() (Client, error) {
|
||||||
// NewClient constructs a Client. This should be used if the caller wants
|
// NewClient constructs a Client. This should be used if the caller wants
|
||||||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
// to specify whether to use HTTPS, a specific REST API version or a custom
|
||||||
// storage endpoint than Azure Public Cloud.
|
// storage endpoint than Azure Public Cloud.
|
||||||
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||||
var c Client
|
var c Client
|
||||||
if accountName == "" {
|
if !IsValidStorageAccount(accountName) {
|
||||||
return c, fmt.Errorf("azure: account name required")
|
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
|
||||||
} else if accountKey == "" {
|
} else if accountKey == "" {
|
||||||
return c, fmt.Errorf("azure: account key required")
|
return c, fmt.Errorf("azure: account key required")
|
||||||
} else if blobServiceBaseURL == "" {
|
} else if serviceBaseURL == "" {
|
||||||
return c, fmt.Errorf("azure: base storage service url required")
|
return c, fmt.Errorf("azure: base storage service url required")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,23 +309,114 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u
|
||||||
}
|
}
|
||||||
|
|
||||||
c = Client{
|
c = Client{
|
||||||
|
HTTPClient: http.DefaultClient,
|
||||||
accountName: accountName,
|
accountName: accountName,
|
||||||
accountKey: key,
|
accountKey: key,
|
||||||
useHTTPS: useHTTPS,
|
useHTTPS: useHTTPS,
|
||||||
baseURL: blobServiceBaseURL,
|
baseURL: serviceBaseURL,
|
||||||
apiVersion: apiVersion,
|
apiVersion: apiVersion,
|
||||||
|
sasClient: false,
|
||||||
UseSharedKeyLite: false,
|
UseSharedKeyLite: false,
|
||||||
|
Sender: &DefaultSender{
|
||||||
|
RetryAttempts: defaultRetryAttempts,
|
||||||
|
ValidStatusCodes: defaultValidStatusCodes,
|
||||||
|
RetryDuration: defaultRetryDuration,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
c.userAgent = c.getDefaultUserAgent()
|
c.userAgent = c.getDefaultUserAgent()
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsValidStorageAccount checks if the storage account name is valid.
|
||||||
|
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
|
||||||
|
func IsValidStorageAccount(account string) bool {
|
||||||
|
return validStorageAccount.MatchString(account)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAccountSASClient contructs a client that uses accountSAS authorization
|
||||||
|
// for its operations.
|
||||||
|
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
|
||||||
|
c := newSASClient()
|
||||||
|
c.accountSASToken = token
|
||||||
|
c.accountName = account
|
||||||
|
c.baseURL = env.StorageEndpointSuffix
|
||||||
|
|
||||||
|
// Get API version and protocol from token
|
||||||
|
c.apiVersion = token.Get("sv")
|
||||||
|
c.useHTTPS = token.Get("spr") == "https"
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
|
||||||
|
// for its operations using the specified endpoint and SAS token.
|
||||||
|
func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) {
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return Client{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := url.ParseQuery(sasToken)
|
||||||
|
if err != nil {
|
||||||
|
return Client{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// the host name will look something like this
|
||||||
|
// - foo.blob.core.windows.net
|
||||||
|
// "foo" is the account name
|
||||||
|
// "core.windows.net" is the baseURL
|
||||||
|
|
||||||
|
// find the first dot to get account name
|
||||||
|
i1 := strings.IndexByte(u.Host, '.')
|
||||||
|
if i1 < 0 {
|
||||||
|
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host)
|
||||||
|
}
|
||||||
|
|
||||||
|
// now find the second dot to get the base URL
|
||||||
|
i2 := strings.IndexByte(u.Host[i1+1:], '.')
|
||||||
|
if i2 < 0 {
|
||||||
|
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
c := newSASClient()
|
||||||
|
c.accountSASToken = token
|
||||||
|
c.accountName = u.Host[:i1]
|
||||||
|
c.baseURL = u.Host[i1+i2+2:]
|
||||||
|
|
||||||
|
// Get API version and protocol from token
|
||||||
|
c.apiVersion = token.Get("sv")
|
||||||
|
c.useHTTPS = token.Get("spr") == "https"
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSASClient() Client {
|
||||||
|
c := Client{
|
||||||
|
HTTPClient: http.DefaultClient,
|
||||||
|
apiVersion: DefaultAPIVersion,
|
||||||
|
sasClient: true,
|
||||||
|
Sender: &DefaultSender{
|
||||||
|
RetryAttempts: defaultRetryAttempts,
|
||||||
|
ValidStatusCodes: defaultValidStatusCodes,
|
||||||
|
RetryDuration: defaultRetryDuration,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
c.userAgent = c.getDefaultUserAgent()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) isServiceSASClient() bool {
|
||||||
|
return c.sasClient && c.accountSASToken == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) isAccountSASClient() bool {
|
||||||
|
return c.sasClient && c.accountSASToken != nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c Client) getDefaultUserAgent() string {
|
func (c Client) getDefaultUserAgent() string {
|
||||||
return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s",
|
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
|
||||||
runtime.Version(),
|
runtime.Version(),
|
||||||
runtime.GOARCH,
|
runtime.GOARCH,
|
||||||
runtime.GOOS,
|
runtime.GOOS,
|
||||||
sdkVersion,
|
version.Number,
|
||||||
c.apiVersion,
|
c.apiVersion,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -210,7 +441,7 @@ func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]str
|
||||||
return extraheaders
|
return extraheaders
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) getBaseURL(service string) string {
|
func (c Client) getBaseURL(service string) *url.URL {
|
||||||
scheme := "http"
|
scheme := "http"
|
||||||
if c.useHTTPS {
|
if c.useHTTPS {
|
||||||
scheme = "https"
|
scheme = "https"
|
||||||
|
@ -229,18 +460,14 @@ func (c Client) getBaseURL(service string) string {
|
||||||
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
u := &url.URL{
|
return &url.URL{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Host: host}
|
Host: host,
|
||||||
return u.String()
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
||||||
u, err := url.Parse(c.getBaseURL(service))
|
u := c.getBaseURL(service)
|
||||||
if err != nil {
|
|
||||||
// really should not be happening
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// API doesn't accept path segments not starting with '/'
|
// API doesn't accept path segments not starting with '/'
|
||||||
if !strings.HasPrefix(path, "/") {
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
@ -256,6 +483,160 @@ func (c Client) getEndpoint(service, path string, params url.Values) string {
|
||||||
return u.String()
|
return u.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AccountSASTokenOptions includes options for constructing
|
||||||
|
// an account SAS token.
|
||||||
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||||
|
type AccountSASTokenOptions struct {
|
||||||
|
APIVersion string
|
||||||
|
Services Services
|
||||||
|
ResourceTypes ResourceTypes
|
||||||
|
Permissions Permissions
|
||||||
|
Start time.Time
|
||||||
|
Expiry time.Time
|
||||||
|
IP string
|
||||||
|
UseHTTPS bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services specify services accessible with an account SAS.
|
||||||
|
type Services struct {
|
||||||
|
Blob bool
|
||||||
|
Queue bool
|
||||||
|
Table bool
|
||||||
|
File bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceTypes specify the resources accesible with an
|
||||||
|
// account SAS.
|
||||||
|
type ResourceTypes struct {
|
||||||
|
Service bool
|
||||||
|
Container bool
|
||||||
|
Object bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permissions specifies permissions for an accountSAS.
|
||||||
|
type Permissions struct {
|
||||||
|
Read bool
|
||||||
|
Write bool
|
||||||
|
Delete bool
|
||||||
|
List bool
|
||||||
|
Add bool
|
||||||
|
Create bool
|
||||||
|
Update bool
|
||||||
|
Process bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccountSASToken creates an account SAS token
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||||
|
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
|
||||||
|
if options.APIVersion == "" {
|
||||||
|
options.APIVersion = c.apiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.APIVersion < "2015-04-05" {
|
||||||
|
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// build services string
|
||||||
|
services := ""
|
||||||
|
if options.Services.Blob {
|
||||||
|
services += "b"
|
||||||
|
}
|
||||||
|
if options.Services.Queue {
|
||||||
|
services += "q"
|
||||||
|
}
|
||||||
|
if options.Services.Table {
|
||||||
|
services += "t"
|
||||||
|
}
|
||||||
|
if options.Services.File {
|
||||||
|
services += "f"
|
||||||
|
}
|
||||||
|
|
||||||
|
// build resources string
|
||||||
|
resources := ""
|
||||||
|
if options.ResourceTypes.Service {
|
||||||
|
resources += "s"
|
||||||
|
}
|
||||||
|
if options.ResourceTypes.Container {
|
||||||
|
resources += "c"
|
||||||
|
}
|
||||||
|
if options.ResourceTypes.Object {
|
||||||
|
resources += "o"
|
||||||
|
}
|
||||||
|
|
||||||
|
// build permissions string
|
||||||
|
permissions := ""
|
||||||
|
if options.Permissions.Read {
|
||||||
|
permissions += "r"
|
||||||
|
}
|
||||||
|
if options.Permissions.Write {
|
||||||
|
permissions += "w"
|
||||||
|
}
|
||||||
|
if options.Permissions.Delete {
|
||||||
|
permissions += "d"
|
||||||
|
}
|
||||||
|
if options.Permissions.List {
|
||||||
|
permissions += "l"
|
||||||
|
}
|
||||||
|
if options.Permissions.Add {
|
||||||
|
permissions += "a"
|
||||||
|
}
|
||||||
|
if options.Permissions.Create {
|
||||||
|
permissions += "c"
|
||||||
|
}
|
||||||
|
if options.Permissions.Update {
|
||||||
|
permissions += "u"
|
||||||
|
}
|
||||||
|
if options.Permissions.Process {
|
||||||
|
permissions += "p"
|
||||||
|
}
|
||||||
|
|
||||||
|
// build start time, if exists
|
||||||
|
start := ""
|
||||||
|
if options.Start != (time.Time{}) {
|
||||||
|
start = options.Start.UTC().Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
// build expiry time
|
||||||
|
expiry := options.Expiry.UTC().Format(time.RFC3339)
|
||||||
|
|
||||||
|
protocol := "https,http"
|
||||||
|
if options.UseHTTPS {
|
||||||
|
protocol = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
stringToSign := strings.Join([]string{
|
||||||
|
c.accountName,
|
||||||
|
permissions,
|
||||||
|
services,
|
||||||
|
resources,
|
||||||
|
start,
|
||||||
|
expiry,
|
||||||
|
options.IP,
|
||||||
|
protocol,
|
||||||
|
options.APIVersion,
|
||||||
|
"",
|
||||||
|
}, "\n")
|
||||||
|
signature := c.computeHmac256(stringToSign)
|
||||||
|
|
||||||
|
sasParams := url.Values{
|
||||||
|
"sv": {options.APIVersion},
|
||||||
|
"ss": {services},
|
||||||
|
"srt": {resources},
|
||||||
|
"sp": {permissions},
|
||||||
|
"se": {expiry},
|
||||||
|
"spr": {protocol},
|
||||||
|
"sig": {signature},
|
||||||
|
}
|
||||||
|
if start != "" {
|
||||||
|
sasParams.Add("st", start)
|
||||||
|
}
|
||||||
|
if options.IP != "" {
|
||||||
|
sasParams.Add("sip", options.IP)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sasParams, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
||||||
// service of the storage account.
|
// service of the storage account.
|
||||||
func (c Client) GetBlobService() BlobStorageClient {
|
func (c Client) GetBlobService() BlobStorageClient {
|
||||||
|
@ -320,7 +701,7 @@ func (c Client) getStandardHeaders() map[string]string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
|
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) {
|
||||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -331,66 +712,43 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
||||||
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if clstr, ok := headers["Content-Length"]; ok {
|
// http.NewRequest() will automatically set req.ContentLength for a handful of types
|
||||||
// content length header is being signed, but completely ignored by golang.
|
// otherwise we will handle here.
|
||||||
// instead we have to use the ContentLength property on the request struct
|
if req.ContentLength < 1 {
|
||||||
// (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and
|
if clstr, ok := headers["Content-Length"]; ok {
|
||||||
// https://golang.org/src/net/http/transfer.go?s=1739:2467#L49)
|
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
|
||||||
req.ContentLength, err = strconv.ParseInt(clstr, 10, 64)
|
req.ContentLength = cl
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range headers {
|
for k, v := range headers {
|
||||||
req.Header.Add(k, v)
|
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := c.HTTPClient
|
if c.isAccountSASClient() {
|
||||||
if httpClient == nil {
|
// append the SAS token to the query params
|
||||||
httpClient = http.DefaultClient
|
v := req.URL.Query()
|
||||||
|
v = mergeParams(v, c.accountSASToken)
|
||||||
|
req.URL.RawQuery = v.Encode()
|
||||||
}
|
}
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
|
resp, err := c.Sender.Send(&c, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
statusCode := resp.StatusCode
|
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
|
||||||
if statusCode >= 400 && statusCode <= 505 {
|
return resp, getErrorFromResponse(resp)
|
||||||
var respBody []byte
|
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
requestID := resp.Header.Get("x-ms-request-id")
|
|
||||||
if len(respBody) == 0 {
|
|
||||||
// no error in response body, might happen in HEAD requests
|
|
||||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID)
|
|
||||||
} else {
|
|
||||||
// response contains storage service error object, unmarshal
|
|
||||||
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID)
|
|
||||||
if err != nil { // error unmarshaling the error response
|
|
||||||
err = errIn
|
|
||||||
}
|
|
||||||
err = storageErr
|
|
||||||
}
|
|
||||||
return &storageResponse{
|
|
||||||
statusCode: resp.StatusCode,
|
|
||||||
headers: resp.Header,
|
|
||||||
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
|
|
||||||
}, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &storageResponse{
|
return resp, nil
|
||||||
statusCode: resp.StatusCode,
|
|
||||||
headers: resp.Header,
|
|
||||||
body: resp.Body}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
|
||||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest(verb, url, body)
|
req, err := http.NewRequest(verb, url, body)
|
||||||
|
@ -398,42 +756,122 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
||||||
req.Header.Add(k, v)
|
req.Header.Add(k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := c.HTTPClient
|
resp, err := c.Sender.Send(&c, req)
|
||||||
if httpClient == nil {
|
|
||||||
httpClient = http.DefaultClient
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
respToRet := &odataResponse{}
|
respToRet := &odataResponse{resp: resp}
|
||||||
respToRet.body = resp.Body
|
|
||||||
respToRet.statusCode = resp.StatusCode
|
|
||||||
respToRet.headers = resp.Header
|
|
||||||
|
|
||||||
statusCode := resp.StatusCode
|
statusCode := resp.StatusCode
|
||||||
if statusCode >= 400 && statusCode <= 505 {
|
if statusCode >= 400 && statusCode <= 505 {
|
||||||
var respBody []byte
|
var respBody []byte
|
||||||
respBody, err = readAndCloseBody(resp.Body)
|
respBody, err = readAndCloseBody(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
requestID, date, version := getDebugHeaders(resp.Header)
|
||||||
if len(respBody) == 0 {
|
if len(respBody) == 0 {
|
||||||
// no error in response body, might happen in HEAD requests
|
// no error in response body, might happen in HEAD requests
|
||||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id"))
|
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
||||||
return respToRet, err
|
return respToRet, req, resp, err
|
||||||
}
|
}
|
||||||
// try unmarshal as odata.error json
|
// try unmarshal as odata.error json
|
||||||
err = json.Unmarshal(respBody, &respToRet.odata)
|
err = json.Unmarshal(respBody, &respToRet.odata)
|
||||||
return respToRet, err
|
}
|
||||||
|
|
||||||
|
return respToRet, req, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||||
|
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
||||||
|
return respToRet, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||||
|
// execute common query, get back generated request, response etc... for more processing.
|
||||||
|
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// return the OData in the case of executing batch commands.
|
||||||
|
// In this case we need to read the outer batch boundary and contents.
|
||||||
|
// Then we read the changeset information within the batch
|
||||||
|
var respBody []byte
|
||||||
|
respBody, err = readAndCloseBody(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// outer multipart body
|
||||||
|
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// batch details.
|
||||||
|
batchBoundary := batchHeader["boundary"]
|
||||||
|
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeset details.
|
||||||
|
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return respToRet, nil
|
return respToRet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
|
||||||
|
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
|
||||||
|
changesetPart, err := changesetMultiReader.NextPart()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
changesetPartBufioReader := bufio.NewReader(changesetPart)
|
||||||
|
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if changesetResp.StatusCode != http.StatusNoContent {
|
||||||
|
changesetBody, err := readAndCloseBody(changesetResp.Body)
|
||||||
|
err = json.Unmarshal(changesetBody, &respToRet.odata)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
respToRet.resp = changesetResp
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
|
||||||
|
respBodyString := string(respBody)
|
||||||
|
respBodyReader := strings.NewReader(respBodyString)
|
||||||
|
|
||||||
|
// reading batchresponse
|
||||||
|
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
|
||||||
|
batchPart, err := batchMultiReader.NextPart()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
batchPartBufioReader := bufio.NewReader(batchPart)
|
||||||
|
|
||||||
|
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
changesetBoundary := changesetHeader["boundary"]
|
||||||
|
return batchPartBufioReader, changesetBoundary, nil
|
||||||
|
}
|
||||||
|
|
||||||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||||
defer body.Close()
|
defer body.Close()
|
||||||
out, err := ioutil.ReadAll(body)
|
out, err := ioutil.ReadAll(body)
|
||||||
|
@ -443,37 +881,109 @@ func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
|
// reads the response body then closes it
|
||||||
var storageErr AzureStorageServiceError
|
func drainRespBody(resp *http.Response) {
|
||||||
if err := xml.Unmarshal(body, &storageErr); err != nil {
|
io.Copy(ioutil.Discard, resp.Body)
|
||||||
return storageErr, err
|
resp.Body.Close()
|
||||||
}
|
|
||||||
storageErr.StatusCode = statusCode
|
|
||||||
storageErr.RequestID = requestID
|
|
||||||
return storageErr, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError {
|
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
|
||||||
|
if err := xml.Unmarshal(body, storageErr); err != nil {
|
||||||
|
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
|
||||||
|
odataError := odataErrorWrapper{}
|
||||||
|
if err := json.Unmarshal(body, &odataError); err != nil {
|
||||||
|
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
storageErr.Code = odataError.Err.Code
|
||||||
|
storageErr.Message = odataError.Err.Message.Value
|
||||||
|
storageErr.Lang = odataError.Err.Message.Lang
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
|
||||||
return AzureStorageServiceError{
|
return AzureStorageServiceError{
|
||||||
StatusCode: code,
|
StatusCode: code,
|
||||||
Code: status,
|
Code: status,
|
||||||
RequestID: requestID,
|
RequestID: requestID,
|
||||||
|
Date: date,
|
||||||
|
APIVersion: version,
|
||||||
Message: "no response body was available for error status code",
|
Message: "no response body was available for error status code",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e AzureStorageServiceError) Error() string {
|
func (e AzureStorageServiceError) Error() string {
|
||||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
||||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
|
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
||||||
// one of the allowed status codes; otherwise nil.
|
// one of the allowed status codes; otherwise nil.
|
||||||
func checkRespCode(respCode int, allowed []int) error {
|
func checkRespCode(resp *http.Response, allowed []int) error {
|
||||||
for _, v := range allowed {
|
for _, v := range allowed {
|
||||||
if respCode == v {
|
if resp.StatusCode == v {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return UnexpectedStatusCodeError{allowed, respCode}
|
err := getErrorFromResponse(resp)
|
||||||
|
return UnexpectedStatusCodeError{
|
||||||
|
allowed: allowed,
|
||||||
|
got: resp.StatusCode,
|
||||||
|
inner: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
|
||||||
|
metadata = c.protectUserAgent(metadata)
|
||||||
|
for k, v := range metadata {
|
||||||
|
h[userDefinedMetadataHeaderPrefix+k] = v
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDebugHeaders(h http.Header) (requestID, date, version string) {
|
||||||
|
requestID = h.Get("x-ms-request-id")
|
||||||
|
version = h.Get("x-ms-version")
|
||||||
|
date = h.Get("Date")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getErrorFromResponse(resp *http.Response) error {
|
||||||
|
respBody, err := readAndCloseBody(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
requestID, date, version := getDebugHeaders(resp.Header)
|
||||||
|
if len(respBody) == 0 {
|
||||||
|
// no error in response body, might happen in HEAD requests
|
||||||
|
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
||||||
|
} else {
|
||||||
|
storageErr := AzureStorageServiceError{
|
||||||
|
StatusCode: resp.StatusCode,
|
||||||
|
RequestID: requestID,
|
||||||
|
Date: date,
|
||||||
|
APIVersion: version,
|
||||||
|
}
|
||||||
|
// response contains storage service error object, unmarshal
|
||||||
|
if resp.Header.Get("Content-Type") == "application/xml" {
|
||||||
|
errIn := serviceErrFromXML(respBody, &storageErr)
|
||||||
|
if err != nil { // error unmarshaling the error response
|
||||||
|
err = errIn
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
errIn := serviceErrFromJSON(respBody, &storageErr)
|
||||||
|
if err != nil { // error unmarshaling the error response
|
||||||
|
err = errIn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = storageErr
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
38
vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
generated
vendored
Normal file
38
vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SASOptions includes options used by SAS URIs for different
|
||||||
|
// services and resources.
|
||||||
|
type SASOptions struct {
|
||||||
|
APIVersion string
|
||||||
|
Start time.Time
|
||||||
|
Expiry time.Time
|
||||||
|
IP string
|
||||||
|
UseHTTPS bool
|
||||||
|
Identifier string
|
||||||
|
}
|
||||||
|
|
||||||
|
func addQueryParameter(query url.Values, key, value string) url.Values {
|
||||||
|
if value != "" {
|
||||||
|
query.Add(key, value)
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
426
vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
generated
vendored
426
vendor/github.com/Azure/azure-sdk-for-go/storage/container.go
generated
vendored
|
@ -1,13 +1,27 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,20 +30,76 @@ type Container struct {
|
||||||
bsc *BlobStorageClient
|
bsc *BlobStorageClient
|
||||||
Name string `xml:"Name"`
|
Name string `xml:"Name"`
|
||||||
Properties ContainerProperties `xml:"Properties"`
|
Properties ContainerProperties `xml:"Properties"`
|
||||||
|
Metadata map[string]string
|
||||||
|
sasuri url.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client returns the HTTP client used by the Container reference.
|
||||||
|
func (c *Container) Client() *Client {
|
||||||
|
return &c.bsc.client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) buildPath() string {
|
func (c *Container) buildPath() string {
|
||||||
return fmt.Sprintf("/%s", c.Name)
|
return fmt.Sprintf("/%s", c.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetURL gets the canonical URL to the container.
|
||||||
|
// This method does not create a publicly accessible URL if the container
|
||||||
|
// is private and this method does not check if the blob exists.
|
||||||
|
func (c *Container) GetURL() string {
|
||||||
|
container := c.Name
|
||||||
|
if container == "" {
|
||||||
|
container = "$root"
|
||||||
|
}
|
||||||
|
return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerSASOptions are options to construct a container SAS
|
||||||
|
// URI.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
type ContainerSASOptions struct {
|
||||||
|
ContainerSASPermissions
|
||||||
|
OverrideHeaders
|
||||||
|
SASOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerSASPermissions includes the available permissions for
|
||||||
|
// a container SAS URI.
|
||||||
|
type ContainerSASPermissions struct {
|
||||||
|
BlobServiceSASPermissions
|
||||||
|
List bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSASURI creates an URL to the container which contains the Shared
|
||||||
|
// Access Signature with the specified options.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) {
|
||||||
|
uri := c.GetURL()
|
||||||
|
signedResource := "c"
|
||||||
|
canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// build permissions string
|
||||||
|
permissions := options.BlobServiceSASPermissions.buildString()
|
||||||
|
if options.List {
|
||||||
|
permissions += "l"
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerProperties contains various properties of a container returned from
|
// ContainerProperties contains various properties of a container returned from
|
||||||
// various endpoints like ListContainers.
|
// various endpoints like ListContainers.
|
||||||
type ContainerProperties struct {
|
type ContainerProperties struct {
|
||||||
LastModified string `xml:"Last-Modified"`
|
LastModified string `xml:"Last-Modified"`
|
||||||
Etag string `xml:"Etag"`
|
Etag string `xml:"Etag"`
|
||||||
LeaseStatus string `xml:"LeaseStatus"`
|
LeaseStatus string `xml:"LeaseStatus"`
|
||||||
LeaseState string `xml:"LeaseState"`
|
LeaseState string `xml:"LeaseState"`
|
||||||
LeaseDuration string `xml:"LeaseDuration"`
|
LeaseDuration string `xml:"LeaseDuration"`
|
||||||
|
PublicAccess ContainerAccessType `xml:"PublicAccess"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerListResponse contains the response fields from
|
// ContainerListResponse contains the response fields from
|
||||||
|
@ -69,6 +139,14 @@ type BlobListResponse struct {
|
||||||
Delimiter string `xml:"Delimiter"`
|
Delimiter string `xml:"Delimiter"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IncludeBlobDataset has options to include in a list blobs operation
|
||||||
|
type IncludeBlobDataset struct {
|
||||||
|
Snapshots bool
|
||||||
|
Metadata bool
|
||||||
|
UncommittedBlobs bool
|
||||||
|
Copy bool
|
||||||
|
}
|
||||||
|
|
||||||
// ListBlobsParameters defines the set of customizable
|
// ListBlobsParameters defines the set of customizable
|
||||||
// parameters to make a List Blobs call.
|
// parameters to make a List Blobs call.
|
||||||
//
|
//
|
||||||
|
@ -77,9 +155,10 @@ type ListBlobsParameters struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Delimiter string
|
Delimiter string
|
||||||
Marker string
|
Marker string
|
||||||
Include string
|
Include *IncludeBlobDataset
|
||||||
MaxResults uint
|
MaxResults uint
|
||||||
Timeout uint
|
Timeout uint
|
||||||
|
RequestID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p ListBlobsParameters) getParameters() url.Values {
|
func (p ListBlobsParameters) getParameters() url.Values {
|
||||||
|
@ -94,19 +173,32 @@ func (p ListBlobsParameters) getParameters() url.Values {
|
||||||
if p.Marker != "" {
|
if p.Marker != "" {
|
||||||
out.Set("marker", p.Marker)
|
out.Set("marker", p.Marker)
|
||||||
}
|
}
|
||||||
if p.Include != "" {
|
if p.Include != nil {
|
||||||
out.Set("include", p.Include)
|
include := []string{}
|
||||||
|
include = addString(include, p.Include.Snapshots, "snapshots")
|
||||||
|
include = addString(include, p.Include.Metadata, "metadata")
|
||||||
|
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
|
||||||
|
include = addString(include, p.Include.Copy, "copy")
|
||||||
|
fullInclude := strings.Join(include, ",")
|
||||||
|
out.Set("include", fullInclude)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
}
|
||||||
if p.Timeout != 0 {
|
if p.Timeout != 0 {
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addString(datasets []string, include bool, text string) []string {
|
||||||
|
if include {
|
||||||
|
datasets = append(datasets, text)
|
||||||
|
}
|
||||||
|
return datasets
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerAccessType defines the access level to the container from a public
|
// ContainerAccessType defines the access level to the container from a public
|
||||||
// request.
|
// request.
|
||||||
//
|
//
|
||||||
|
@ -142,123 +234,160 @@ const (
|
||||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
ContainerAccessHeader string = "x-ms-blob-public-access"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// GetBlobReference returns a Blob object for the specified blob name.
|
||||||
|
func (c *Container) GetBlobReference(name string) *Blob {
|
||||||
|
return &Blob{
|
||||||
|
Container: c,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateContainerOptions includes the options for a create container operation
|
||||||
|
type CreateContainerOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Access ContainerAccessType `header:"x-ms-blob-public-access"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
// Create creates a blob container within the storage account
|
// Create creates a blob container within the storage account
|
||||||
// with given name and access level. Returns error if container already exists.
|
// with given name and access level. Returns error if container already exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
|
||||||
func (c *Container) Create() error {
|
func (c *Container) Create(options *CreateContainerOptions) error {
|
||||||
resp, err := c.create()
|
resp, err := c.create(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
return checkRespCode(resp, []int{http.StatusCreated})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
||||||
// true if container is newly created or false if container already exists.
|
// true if container is newly created or false if container already exists.
|
||||||
func (c *Container) CreateIfNotExists() (bool, error) {
|
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
|
||||||
resp, err := c.create()
|
resp, err := c.create(options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
|
||||||
return resp.statusCode == http.StatusCreated, nil
|
return resp.StatusCode == http.StatusCreated, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) create() (*storageResponse, error) {
|
func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) {
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
query := url.Values{"restype": {"container"}}
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
||||||
|
|
||||||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists returns true if a container with given name exists
|
// Exists returns true if a container with given name exists
|
||||||
// on the storage account, otherwise returns false.
|
// on the storage account, otherwise returns false.
|
||||||
func (c *Container) Exists() (bool, error) {
|
func (c *Container) Exists() (bool, error) {
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
q := url.Values{"restype": {"container"}}
|
||||||
|
var uri string
|
||||||
|
if c.bsc.client.isServiceSASClient() {
|
||||||
|
q = mergeParams(q, c.sasuri.Query())
|
||||||
|
newURI := c.sasuri
|
||||||
|
newURI.RawQuery = q.Encode()
|
||||||
|
uri = newURI.String()
|
||||||
|
|
||||||
|
} else {
|
||||||
|
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
||||||
|
}
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
|
||||||
return resp.statusCode == http.StatusOK, nil
|
return resp.StatusCode == http.StatusOK, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
|
// SetContainerPermissionOptions includes options for a set container permissions operation
|
||||||
func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error {
|
type SetContainerPermissionOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPermissions sets up container permissions
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
|
||||||
|
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
|
||||||
|
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
params := url.Values{
|
params := url.Values{
|
||||||
"restype": {"container"},
|
"restype": {"container"},
|
||||||
"comp": {"acl"},
|
"comp": {"acl"},
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", strconv.Itoa(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
if permissions.AccessType != "" {
|
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
|
||||||
headers[ContainerAccessHeader] = string(permissions.AccessType)
|
|
||||||
}
|
|
||||||
|
|
||||||
if leaseID != "" {
|
|
||||||
headers[headerLeaseID] = leaseID
|
|
||||||
}
|
|
||||||
|
|
||||||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
|
||||||
headers["Content-Length"] = strconv.Itoa(length)
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusOK})
|
||||||
|
}
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
// GetContainerPermissionOptions includes options for a get container permissions operation
|
||||||
return errors.New("Unable to set permissions")
|
type GetContainerPermissionOptions struct {
|
||||||
}
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
return nil
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
||||||
// If timeout is 0 then it will not be passed to Azure
|
// If timeout is 0 then it will not be passed to Azure
|
||||||
// leaseID will only be passed to Azure if populated
|
// leaseID will only be passed to Azure if populated
|
||||||
func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) {
|
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
|
||||||
params := url.Values{
|
params := url.Values{
|
||||||
"restype": {"container"},
|
"restype": {"container"},
|
||||||
"comp": {"acl"},
|
"comp": {"acl"},
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
params.Add("timeout", strconv.Itoa(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
if leaseID != "" {
|
if options != nil {
|
||||||
headers[headerLeaseID] = leaseID
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
}
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
var ap AccessPolicy
|
var ap AccessPolicy
|
||||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return buildAccessPolicy(ap, &resp.headers), nil
|
return buildAccessPolicy(ap, &resp.Header), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
|
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
|
||||||
|
@ -284,17 +413,26 @@ func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissi
|
||||||
return &permissions
|
return &permissions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteContainerOptions includes options for a delete container operation
|
||||||
|
type DeleteContainerOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
// Delete deletes the container with given name on the storage
|
// Delete deletes the container with given name on the storage
|
||||||
// account. If the container does not exist returns error.
|
// account. If the container does not exist returns error.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
||||||
func (c *Container) Delete() error {
|
func (c *Container) Delete(options *DeleteContainerOptions) error {
|
||||||
resp, err := c.delete()
|
resp, err := c.delete(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
return checkRespCode(resp, []int{http.StatusAccepted})
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists deletes the container with given name on the storage
|
// DeleteIfExists deletes the container with given name on the storage
|
||||||
|
@ -302,47 +440,142 @@ func (c *Container) Delete() error {
|
||||||
// false if the container did not exist at the time of the Delete Container
|
// false if the container did not exist at the time of the Delete Container
|
||||||
// operation.
|
// operation.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
||||||
func (c *Container) DeleteIfExists() (bool, error) {
|
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
|
||||||
resp, err := c.delete()
|
resp, err := c.delete(options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
return resp.StatusCode == http.StatusAccepted, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) delete() (*storageResponse, error) {
|
func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) {
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
query := url.Values{"restype": {"container"}}
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
||||||
|
|
||||||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBlobs returns an object that contains list of blobs in the container,
|
// ListBlobs returns an object that contains list of blobs in the container,
|
||||||
// pagination token and other information in the response of List Blobs call.
|
// pagination token and other information in the response of List Blobs call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
|
||||||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
||||||
q := mergeParams(params.getParameters(), url.Values{
|
q := mergeParams(params.getParameters(), url.Values{
|
||||||
"restype": {"container"},
|
"restype": {"container"},
|
||||||
"comp": {"list"}},
|
"comp": {"list"},
|
||||||
)
|
})
|
||||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
var uri string
|
||||||
|
if c.bsc.client.isServiceSASClient() {
|
||||||
|
q = mergeParams(q, c.sasuri.Query())
|
||||||
|
newURI := c.sasuri
|
||||||
|
newURI.RawQuery = q.Encode()
|
||||||
|
uri = newURI.String()
|
||||||
|
} else {
|
||||||
|
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
||||||
|
}
|
||||||
|
|
||||||
headers := c.bsc.client.getStandardHeaders()
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
|
||||||
|
|
||||||
var out BlobListResponse
|
var out BlobListResponse
|
||||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
|
for i := range out.Blobs {
|
||||||
|
out.Blobs[i].Container = c
|
||||||
|
}
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContainerMetadataOptions includes options for container metadata operations
|
||||||
|
type ContainerMetadataOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata replaces the metadata for the specified container.
|
||||||
|
//
|
||||||
|
// Some keys may be converted to Camel-Case before sending. All keys
|
||||||
|
// are returned in lower case by GetBlobMetadata. HTTP header names
|
||||||
|
// are case-insensitive so case munging should not matter to other
|
||||||
|
// applications either.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
|
||||||
|
func (c *Container) SetMetadata(options *ContainerMetadataOptions) error {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"metadata"},
|
||||||
|
"restype": {"container"},
|
||||||
|
}
|
||||||
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusOK})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMetadata returns all user-defined metadata for the specified container.
|
||||||
|
//
|
||||||
|
// All metadata keys will be returned in lower case. (HTTP header
|
||||||
|
// names are case-insensitive.)
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
|
||||||
|
func (c *Container) GetMetadata(options *ContainerMetadataOptions) error {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"metadata"},
|
||||||
|
"restype": {"container"},
|
||||||
|
}
|
||||||
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.writeMetadata(resp.Header)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) writeMetadata(h http.Header) {
|
||||||
|
c.Metadata = writeMetadata(h)
|
||||||
|
}
|
||||||
|
|
||||||
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
|
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
|
||||||
sil := SignedIdentifiers{
|
sil := SignedIdentifiers{
|
||||||
SignedIdentifiers: []SignedIdentifier{},
|
SignedIdentifiers: []SignedIdentifier{},
|
||||||
|
@ -374,3 +607,34 @@ func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions s
|
||||||
|
|
||||||
return permissions
|
return permissions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetProperties updated the properties of the container.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
|
||||||
|
func (c *Container) GetProperties() error {
|
||||||
|
params := url.Values{
|
||||||
|
"restype": {"container"},
|
||||||
|
}
|
||||||
|
headers := c.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// update properties
|
||||||
|
c.Properties.Etag = resp.Header.Get(headerEtag)
|
||||||
|
c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status")
|
||||||
|
c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state")
|
||||||
|
c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration")
|
||||||
|
c.Properties.LastModified = resp.Header.Get("Last-Modified")
|
||||||
|
c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
237
vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
generated
vendored
Normal file
237
vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
blobCopyStatusPending = "pending"
|
||||||
|
blobCopyStatusSuccess = "success"
|
||||||
|
blobCopyStatusAborted = "aborted"
|
||||||
|
blobCopyStatusFailed = "failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CopyOptions includes the options for a copy blob operation
|
||||||
|
type CopyOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Source CopyOptionsConditions
|
||||||
|
Destiny CopyOptionsConditions
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementalCopyOptions includes the options for an incremental copy blob operation
|
||||||
|
type IncrementalCopyOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Destination IncrementalCopyOptionsConditions
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyOptionsConditions includes some conditional options in a copy blob operation
|
||||||
|
type CopyOptionsConditions struct {
|
||||||
|
LeaseID string
|
||||||
|
IfModifiedSince *time.Time
|
||||||
|
IfUnmodifiedSince *time.Time
|
||||||
|
IfMatch string
|
||||||
|
IfNoneMatch string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
|
||||||
|
type IncrementalCopyOptionsConditions struct {
|
||||||
|
IfModifiedSince *time.Time
|
||||||
|
IfUnmodifiedSince *time.Time
|
||||||
|
IfMatch string
|
||||||
|
IfNoneMatch string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy starts a blob copy operation and waits for the operation to
|
||||||
|
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
|
||||||
|
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
|
||||||
|
// this helper method works faster on smaller files.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
||||||
|
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
|
||||||
|
copyID, err := b.StartCopy(sourceBlob, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.WaitForCopy(copyID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartCopy starts a blob copy operation.
|
||||||
|
// sourceBlob parameter must be a canonical URL to the blob (can be
|
||||||
|
// obtained using the GetURL method.)
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
||||||
|
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-copy-source"] = sourceBlob
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
// source
|
||||||
|
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
|
||||||
|
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
|
||||||
|
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
|
||||||
|
//destiny
|
||||||
|
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
copyID := resp.Header.Get("x-ms-copy-id")
|
||||||
|
if copyID == "" {
|
||||||
|
return "", errors.New("Got empty copy id header")
|
||||||
|
}
|
||||||
|
return copyID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortCopyOptions includes the options for an abort blob operation
|
||||||
|
type AbortCopyOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
|
||||||
|
// copyID is generated from StartBlobCopy function.
|
||||||
|
// currentLeaseID is required IF the destination blob has an active lease on it.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
|
||||||
|
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"copy"},
|
||||||
|
"copyid": {copyID},
|
||||||
|
}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-copy-action"] = "abort"
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
|
||||||
|
func (b *Blob) WaitForCopy(copyID string) error {
|
||||||
|
for {
|
||||||
|
err := b.GetProperties(nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Properties.CopyID != copyID {
|
||||||
|
return errBlobCopyIDMismatch
|
||||||
|
}
|
||||||
|
|
||||||
|
switch b.Properties.CopyStatus {
|
||||||
|
case blobCopyStatusSuccess:
|
||||||
|
return nil
|
||||||
|
case blobCopyStatusPending:
|
||||||
|
continue
|
||||||
|
case blobCopyStatusAborted:
|
||||||
|
return errBlobCopyAborted
|
||||||
|
case blobCopyStatusFailed:
|
||||||
|
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
|
||||||
|
// sourceBlob parameter must be a valid snapshot URL of the original blob.
|
||||||
|
// THe original blob mut be public, or use a Shared Access Signature.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
|
||||||
|
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
|
||||||
|
params := url.Values{"comp": {"incrementalcopy"}}
|
||||||
|
|
||||||
|
// need formatting to 7 decimal places so it's friendly to Windows and *nix
|
||||||
|
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
|
||||||
|
u, err := url.Parse(sourceBlobURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
query := u.Query()
|
||||||
|
query.Add("snapshot", snapshotTimeFormatted)
|
||||||
|
encodedQuery := query.Encode()
|
||||||
|
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
|
||||||
|
u.RawQuery = encodedQuery
|
||||||
|
snapshotURL := u.String()
|
||||||
|
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-copy-source"] = snapshotURL
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
addTimeout(params, options.Timeout)
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
|
||||||
|
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
|
||||||
|
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get URI of destination blob
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
copyID := resp.Header.Get("x-ms-copy-id")
|
||||||
|
if copyID == "" {
|
||||||
|
return "", errors.New("Got empty copy id header")
|
||||||
|
}
|
||||||
|
return copyID, nil
|
||||||
|
}
|
81
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
generated
vendored
81
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
generated
vendored
|
@ -1,9 +1,24 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Directory represents a directory on a share.
|
// Directory represents a directory on a share.
|
||||||
|
@ -25,8 +40,9 @@ type DirectoryProperties struct {
|
||||||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
||||||
// make a List Files and Directories call.
|
// make a List Files and Directories call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||||
type ListDirsAndFilesParameters struct {
|
type ListDirsAndFilesParameters struct {
|
||||||
|
Prefix string
|
||||||
Marker string
|
Marker string
|
||||||
MaxResults uint
|
MaxResults uint
|
||||||
Timeout uint
|
Timeout uint
|
||||||
|
@ -35,7 +51,7 @@ type ListDirsAndFilesParameters struct {
|
||||||
// DirsAndFilesListResponse contains the response fields from
|
// DirsAndFilesListResponse contains the response fields from
|
||||||
// a List Files and Directories call.
|
// a List Files and Directories call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||||
type DirsAndFilesListResponse struct {
|
type DirsAndFilesListResponse struct {
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
XMLName xml.Name `xml:"EnumerationResults"`
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
Xmlns string `xml:"xmlns,attr"`
|
||||||
|
@ -60,14 +76,15 @@ func (d *Directory) buildPath() string {
|
||||||
// Create this directory in the associated share.
|
// Create this directory in the associated share.
|
||||||
// If a directory with the same name already exists, the operation fails.
|
// If a directory with the same name already exists, the operation fails.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
||||||
func (d *Directory) Create() error {
|
func (d *Directory) Create(options *FileRequestOptions) error {
|
||||||
// if this is the root directory exit early
|
// if this is the root directory exit early
|
||||||
if d.parent == nil {
|
if d.parent == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
params := prepareOptions(options)
|
||||||
|
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -80,23 +97,24 @@ func (d *Directory) Create() error {
|
||||||
// directory does not exists. Returns true if the directory is newly created or
|
// directory does not exists. Returns true if the directory is newly created or
|
||||||
// false if the directory already exists.
|
// false if the directory already exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
||||||
func (d *Directory) CreateIfNotExists() (bool, error) {
|
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
||||||
// if this is the root directory exit early
|
// if this is the root directory exit early
|
||||||
if d.parent == nil {
|
if d.parent == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil)
|
params := prepareOptions(options)
|
||||||
|
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
|
||||||
if resp.statusCode == http.StatusCreated {
|
if resp.StatusCode == http.StatusCreated {
|
||||||
d.updateEtagAndLastModified(resp.headers)
|
d.updateEtagAndLastModified(resp.Header)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, d.FetchAttributes()
|
return false, d.FetchAttributes(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,20 +124,20 @@ func (d *Directory) CreateIfNotExists() (bool, error) {
|
||||||
// Delete removes this directory. It must be empty in order to be deleted.
|
// Delete removes this directory. It must be empty in order to be deleted.
|
||||||
// If the directory does not exist the operation fails.
|
// If the directory does not exist the operation fails.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
||||||
func (d *Directory) Delete() error {
|
func (d *Directory) Delete(options *FileRequestOptions) error {
|
||||||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory)
|
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists removes this directory if it exists.
|
// DeleteIfExists removes this directory if it exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
||||||
func (d *Directory) DeleteIfExists() (bool, error) {
|
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
|
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
return resp.StatusCode == http.StatusAccepted, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -135,8 +153,10 @@ func (d *Directory) Exists() (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchAttributes retrieves metadata for this directory.
|
// FetchAttributes retrieves metadata for this directory.
|
||||||
func (d *Directory) FetchAttributes() error {
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
|
||||||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead)
|
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -164,13 +184,14 @@ func (d *Directory) GetFileReference(name string) *File {
|
||||||
Name: name,
|
Name: name,
|
||||||
parent: d,
|
parent: d,
|
||||||
share: d.share,
|
share: d.share,
|
||||||
|
mutex: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
// ListDirsAndFiles returns a list of files and directories under this directory.
|
||||||
// It also contains a pagination token and other response details.
|
// It also contains a pagination token and other response details.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
||||||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
||||||
|
|
||||||
|
@ -179,9 +200,9 @@ func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAn
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
var out DirsAndFilesListResponse
|
var out DirsAndFilesListResponse
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
return &out, err
|
return &out, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,9 +213,9 @@ func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAn
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
|
||||||
func (d *Directory) SetMetadata() error {
|
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
|
||||||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
|
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
456
vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
generated
vendored
Normal file
456
vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go
generated
vendored
Normal file
|
@ -0,0 +1,456 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/satori/go.uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Annotating as secure for gas scanning
|
||||||
|
/* #nosec */
|
||||||
|
const (
|
||||||
|
partitionKeyNode = "PartitionKey"
|
||||||
|
rowKeyNode = "RowKey"
|
||||||
|
etagErrorTemplate = "Etag didn't match: %v"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
|
||||||
|
errNilPreviousResult = errors.New("The previous results page is nil")
|
||||||
|
errNilNextLink = errors.New("There are no more pages in this query results")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Entity represents an entity inside an Azure table.
|
||||||
|
type Entity struct {
|
||||||
|
Table *Table
|
||||||
|
PartitionKey string
|
||||||
|
RowKey string
|
||||||
|
TimeStamp time.Time
|
||||||
|
OdataMetadata string
|
||||||
|
OdataType string
|
||||||
|
OdataID string
|
||||||
|
OdataEtag string
|
||||||
|
OdataEditLink string
|
||||||
|
Properties map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEntityReference returns an Entity object with the specified
|
||||||
|
// partition key and row key.
|
||||||
|
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
|
||||||
|
return &Entity{
|
||||||
|
PartitionKey: partitionKey,
|
||||||
|
RowKey: rowKey,
|
||||||
|
Table: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EntityOptions includes options for entity operations.
|
||||||
|
type EntityOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEntityOptions includes options for a get entity operation
|
||||||
|
type GetEntityOptions struct {
|
||||||
|
Select []string
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the referenced entity. Which properties to get can be
|
||||||
|
// specified using the select option.
|
||||||
|
// See:
|
||||||
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
||||||
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
||||||
|
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return errEmptyPayload
|
||||||
|
}
|
||||||
|
// RowKey and PartitionKey could be lost if not included in the query
|
||||||
|
// As those are the entity identifiers, it is best if they are not lost
|
||||||
|
rk := e.RowKey
|
||||||
|
pk := e.PartitionKey
|
||||||
|
|
||||||
|
query := url.Values{
|
||||||
|
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
||||||
|
}
|
||||||
|
headers := e.Table.tsc.client.getStandardHeaders()
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if len(options.Select) > 0 {
|
||||||
|
query.Add("$select", strings.Join(options.Select, ","))
|
||||||
|
}
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(respBody, e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.PartitionKey = pk
|
||||||
|
e.RowKey = rk
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts the referenced entity in its table.
|
||||||
|
// The function fails if there is an entity with the same
|
||||||
|
// PartitionKey and RowKey in the table.
|
||||||
|
// ml determines the level of detail of metadata in the operation response,
|
||||||
|
// or no data at all.
|
||||||
|
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
|
||||||
|
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
body, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers = addBodyRelatedHeaders(headers, len(body))
|
||||||
|
headers = addReturnContentHeaders(headers, ml)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = e.UnmarshalJSON(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the contents of an entity. The function fails if there is no entity
|
||||||
|
// with the same PartitionKey and RowKey in the table or if the ETag is different
|
||||||
|
// than the one in Azure.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
|
||||||
|
func (e *Entity) Update(force bool, options *EntityOptions) error {
|
||||||
|
return e.updateMerge(force, http.MethodPut, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges the contents of entity specified with PartitionKey and RowKey
|
||||||
|
// with the content specified in Properties.
|
||||||
|
// The function fails if there is no entity with the same PartitionKey and
|
||||||
|
// RowKey in the table or if the ETag is different than the one in Azure.
|
||||||
|
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
|
||||||
|
func (e *Entity) Merge(force bool, options *EntityOptions) error {
|
||||||
|
return e.updateMerge(force, "MERGE", options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the entity.
|
||||||
|
// The function fails if there is no entity with the same PartitionKey and
|
||||||
|
// RowKey in the table or if the ETag is different than the one in Azure.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
|
||||||
|
func (e *Entity) Delete(force bool, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
headers = addIfMatchHeader(headers, force, e.OdataEtag)
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
if resp.StatusCode == http.StatusPreconditionFailed {
|
||||||
|
return fmt.Errorf(etagErrorTemplate, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.updateTimestamp(resp.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrReplace inserts an entity or replaces the existing one.
|
||||||
|
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
|
||||||
|
func (e *Entity) InsertOrReplace(options *EntityOptions) error {
|
||||||
|
return e.insertOr(http.MethodPut, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrMerge inserts an entity or merges the existing one.
|
||||||
|
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
|
||||||
|
func (e *Entity) InsertOrMerge(options *EntityOptions) error {
|
||||||
|
return e.insertOr("MERGE", options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) buildPath() string {
|
||||||
|
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON is a custom marshaller for entity
|
||||||
|
func (e *Entity) MarshalJSON() ([]byte, error) {
|
||||||
|
completeMap := map[string]interface{}{}
|
||||||
|
completeMap[partitionKeyNode] = e.PartitionKey
|
||||||
|
completeMap[rowKeyNode] = e.RowKey
|
||||||
|
for k, v := range e.Properties {
|
||||||
|
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
|
||||||
|
switch t := v.(type) {
|
||||||
|
case []byte:
|
||||||
|
completeMap[typeKey] = OdataBinary
|
||||||
|
completeMap[k] = t
|
||||||
|
case time.Time:
|
||||||
|
completeMap[typeKey] = OdataDateTime
|
||||||
|
completeMap[k] = t.Format(time.RFC3339Nano)
|
||||||
|
case uuid.UUID:
|
||||||
|
completeMap[typeKey] = OdataGUID
|
||||||
|
completeMap[k] = t.String()
|
||||||
|
case int64:
|
||||||
|
completeMap[typeKey] = OdataInt64
|
||||||
|
completeMap[k] = fmt.Sprintf("%v", v)
|
||||||
|
default:
|
||||||
|
completeMap[k] = v
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(k, OdataTypeSuffix) {
|
||||||
|
if !(completeMap[k] == OdataBinary ||
|
||||||
|
completeMap[k] == OdataDateTime ||
|
||||||
|
completeMap[k] == OdataGUID ||
|
||||||
|
completeMap[k] == OdataInt64) {
|
||||||
|
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
|
||||||
|
}
|
||||||
|
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
|
||||||
|
if _, ok := completeMap[valueKey]; !ok {
|
||||||
|
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return json.Marshal(completeMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON is a custom unmarshaller for entities
|
||||||
|
func (e *Entity) UnmarshalJSON(data []byte) error {
|
||||||
|
errorTemplate := "Deserializing error: %v"
|
||||||
|
|
||||||
|
props := map[string]interface{}{}
|
||||||
|
err := json.Unmarshal(data, &props)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// deselialize metadata
|
||||||
|
e.OdataMetadata = stringFromMap(props, "odata.metadata")
|
||||||
|
e.OdataType = stringFromMap(props, "odata.type")
|
||||||
|
e.OdataID = stringFromMap(props, "odata.id")
|
||||||
|
e.OdataEtag = stringFromMap(props, "odata.etag")
|
||||||
|
e.OdataEditLink = stringFromMap(props, "odata.editLink")
|
||||||
|
e.PartitionKey = stringFromMap(props, partitionKeyNode)
|
||||||
|
e.RowKey = stringFromMap(props, rowKeyNode)
|
||||||
|
|
||||||
|
// deserialize timestamp
|
||||||
|
timeStamp, ok := props["Timestamp"]
|
||||||
|
if ok {
|
||||||
|
str, ok := timeStamp.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(errorTemplate, "Timestamp casting error")
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
e.TimeStamp = t
|
||||||
|
}
|
||||||
|
delete(props, "Timestamp")
|
||||||
|
delete(props, "Timestamp@odata.type")
|
||||||
|
|
||||||
|
// deserialize entity (user defined fields)
|
||||||
|
for k, v := range props {
|
||||||
|
if strings.HasSuffix(k, OdataTypeSuffix) {
|
||||||
|
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
|
||||||
|
str, ok := props[valueKey].(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
|
||||||
|
}
|
||||||
|
switch v {
|
||||||
|
case OdataBinary:
|
||||||
|
props[valueKey], err = base64.StdEncoding.DecodeString(str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
case OdataDateTime:
|
||||||
|
t, err := time.Parse("2006-01-02T15:04:05Z", str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
props[valueKey] = t
|
||||||
|
case OdataGUID:
|
||||||
|
props[valueKey] = uuid.FromStringOrNil(str)
|
||||||
|
case OdataInt64:
|
||||||
|
i, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(errorTemplate, err)
|
||||||
|
}
|
||||||
|
props[valueKey] = i
|
||||||
|
default:
|
||||||
|
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
|
||||||
|
}
|
||||||
|
delete(props, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Properties = props
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAndDelete(props map[string]interface{}, key string) interface{} {
|
||||||
|
if value, ok := props[key]; ok {
|
||||||
|
delete(props, key)
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
|
||||||
|
if force {
|
||||||
|
h[headerIfMatch] = "*"
|
||||||
|
} else {
|
||||||
|
h[headerIfMatch] = etag
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// updates Etag and timestamp
|
||||||
|
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
|
||||||
|
e.OdataEtag = headers.Get(headerEtag)
|
||||||
|
return e.updateTimestamp(headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) updateTimestamp(headers http.Header) error {
|
||||||
|
str := headers.Get(headerDate)
|
||||||
|
t, err := time.Parse(time.RFC1123, str)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Update timestamp error: %v", err)
|
||||||
|
}
|
||||||
|
e.TimeStamp = t
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) insertOr(verb string, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
body, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers = addBodyRelatedHeaders(headers, len(body))
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.updateEtagAndTimestamp(resp.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
|
||||||
|
|
||||||
|
body, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers = addBodyRelatedHeaders(headers, len(body))
|
||||||
|
headers = addIfMatchHeader(headers, force, e.OdataEtag)
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
|
|
||||||
|
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
|
||||||
|
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
if resp.StatusCode == http.StatusPreconditionFailed {
|
||||||
|
return fmt.Errorf(etagErrorTemplate, err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.updateEtagAndTimestamp(resp.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringFromMap(props map[string]interface{}, key string) string {
|
||||||
|
value := getAndDelete(props, key)
|
||||||
|
if value != nil {
|
||||||
|
return value.(string)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := map[string]string{}
|
||||||
|
if options != nil {
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = headersFromStruct(*options)
|
||||||
|
}
|
||||||
|
return query, headers
|
||||||
|
}
|
252
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
generated
vendored
252
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -8,11 +22,16 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
const fourMB = uint64(4194304)
|
const fourMB = uint64(4194304)
|
||||||
const oneTB = uint64(1099511627776)
|
const oneTB = uint64(1099511627776)
|
||||||
|
|
||||||
|
// Export maximum range and file sizes
|
||||||
|
const MaxRangeSize = fourMB
|
||||||
|
const MaxFileSize = oneTB
|
||||||
|
|
||||||
// File represents a file on a share.
|
// File represents a file on a share.
|
||||||
type File struct {
|
type File struct {
|
||||||
fsc *FileServiceClient
|
fsc *FileServiceClient
|
||||||
|
@ -22,6 +41,7 @@ type File struct {
|
||||||
Properties FileProperties `xml:"Properties"`
|
Properties FileProperties `xml:"Properties"`
|
||||||
share *Share
|
share *Share
|
||||||
FileCopyProperties FileCopyState
|
FileCopyProperties FileCopyState
|
||||||
|
mutex *sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileProperties contains various properties of a file.
|
// FileProperties contains various properties of a file.
|
||||||
|
@ -32,7 +52,7 @@ type FileProperties struct {
|
||||||
Etag string
|
Etag string
|
||||||
Language string `header:"x-ms-content-language"`
|
Language string `header:"x-ms-content-language"`
|
||||||
LastModified string
|
LastModified string
|
||||||
Length uint64 `xml:"Content-Length"`
|
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
|
||||||
MD5 string `header:"x-ms-content-md5"`
|
MD5 string `header:"x-ms-content-md5"`
|
||||||
Type string `header:"x-ms-content-type"`
|
Type string `header:"x-ms-content-type"`
|
||||||
}
|
}
|
||||||
|
@ -54,26 +74,22 @@ type FileStream struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileRequestOptions will be passed to misc file operations.
|
// FileRequestOptions will be passed to misc file operations.
|
||||||
// Currently just Timeout (in seconds) but will expand.
|
// Currently just Timeout (in seconds) but could expand.
|
||||||
type FileRequestOptions struct {
|
type FileRequestOptions struct {
|
||||||
Timeout uint // timeout duration in seconds.
|
Timeout uint // timeout duration in seconds.
|
||||||
}
|
}
|
||||||
|
|
||||||
// getParameters, construct parameters for FileRequestOptions.
|
func prepareOptions(options *FileRequestOptions) url.Values {
|
||||||
// currently only timeout, but expecting to grow as functionality fills out.
|
params := url.Values{}
|
||||||
func (p FileRequestOptions) getParameters() url.Values {
|
if options != nil {
|
||||||
out := url.Values{}
|
params = addTimeout(params, options.Timeout)
|
||||||
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
|
||||||
}
|
}
|
||||||
|
return params
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileRanges contains a list of file range information for a file.
|
// FileRanges contains a list of file range information for a file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
||||||
type FileRanges struct {
|
type FileRanges struct {
|
||||||
ContentLength uint64
|
ContentLength uint64
|
||||||
LastModified string
|
LastModified string
|
||||||
|
@ -83,7 +99,7 @@ type FileRanges struct {
|
||||||
|
|
||||||
// FileRange contains range information for a file.
|
// FileRange contains range information for a file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
||||||
type FileRange struct {
|
type FileRange struct {
|
||||||
Start uint64 `xml:"Start"`
|
Start uint64 `xml:"Start"`
|
||||||
End uint64 `xml:"End"`
|
End uint64 `xml:"End"`
|
||||||
|
@ -100,9 +116,13 @@ func (f *File) buildPath() string {
|
||||||
|
|
||||||
// ClearRange releases the specified range of space in a file.
|
// ClearRange releases the specified range of space in a file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
||||||
func (f *File) ClearRange(fileRange FileRange) error {
|
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
|
||||||
headers, err := f.modifyRange(nil, fileRange, nil)
|
var timeout *uint
|
||||||
|
if options != nil {
|
||||||
|
timeout = &options.Timeout
|
||||||
|
}
|
||||||
|
headers, err := f.modifyRange(nil, fileRange, timeout, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -113,24 +133,23 @@ func (f *File) ClearRange(fileRange FileRange) error {
|
||||||
|
|
||||||
// Create creates a new file or replaces an existing one.
|
// Create creates a new file or replaces an existing one.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
|
||||||
func (f *File) Create(maxSize uint64) error {
|
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
|
||||||
if maxSize > oneTB {
|
if maxSize > oneTB {
|
||||||
return fmt.Errorf("max file size is 1TB")
|
return fmt.Errorf("max file size is 1TB")
|
||||||
}
|
}
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers := headersFromStruct(f.Properties)
|
||||||
|
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
|
||||||
|
headers["x-ms-type"] = "file"
|
||||||
|
|
||||||
extraHeaders := map[string]string{
|
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
|
||||||
"x-ms-content-length": strconv.FormatUint(maxSize, 10),
|
|
||||||
"x-ms-type": "file",
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f.Properties.Length = maxSize
|
f.Properties.Length = maxSize
|
||||||
f.updateEtagAndLastModified(headers)
|
f.updateEtagAndLastModified(outputHeaders)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,70 +161,94 @@ func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
|
||||||
"x-ms-type": "file",
|
"x-ms-type": "file",
|
||||||
"x-ms-copy-source": sourceURL,
|
"x-ms-copy-source": sourceURL,
|
||||||
}
|
}
|
||||||
|
params := prepareOptions(options)
|
||||||
|
|
||||||
var parameters url.Values
|
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
||||||
if options != nil {
|
|
||||||
parameters = options.getParameters()
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f.updateEtagLastModifiedAndCopyHeaders(headers)
|
f.updateEtagAndLastModified(headers)
|
||||||
|
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
|
||||||
|
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete immediately removes this file from the storage account.
|
// Delete immediately removes this file from the storage account.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
||||||
func (f *File) Delete() error {
|
func (f *File) Delete(options *FileRequestOptions) error {
|
||||||
return f.fsc.deleteResource(f.buildPath(), resourceFile)
|
return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists removes this file if it exists.
|
// DeleteIfExists removes this file if it exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
|
||||||
func (f *File) DeleteIfExists() (bool, error) {
|
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
|
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
return resp.StatusCode == http.StatusAccepted, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetFileOptions includes options for a get file operation
|
||||||
|
type GetFileOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
GetContentMD5 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadToStream operation downloads the file.
|
||||||
|
//
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
||||||
|
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
drainRespBody(resp)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
||||||
//
|
//
|
||||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
||||||
func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) {
|
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
|
||||||
if getContentMD5 && isRangeTooBig(fileRange) {
|
|
||||||
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
|
||||||
}
|
|
||||||
|
|
||||||
extraHeaders := map[string]string{
|
extraHeaders := map[string]string{
|
||||||
"Range": fileRange.String(),
|
"Range": fileRange.String(),
|
||||||
}
|
}
|
||||||
if getContentMD5 == true {
|
params := url.Values{}
|
||||||
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
if options != nil {
|
||||||
|
if options.GetContentMD5 {
|
||||||
|
if isRangeTooBig(fileRange) {
|
||||||
|
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
||||||
|
}
|
||||||
|
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
||||||
|
}
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders)
|
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fs, err
|
return fs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
||||||
resp.body.Close()
|
drainRespBody(resp)
|
||||||
return fs, err
|
return fs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Body = resp.body
|
fs.Body = resp.Body
|
||||||
if getContentMD5 {
|
if options != nil && options.GetContentMD5 {
|
||||||
fs.ContentMD5 = resp.headers.Get("Content-MD5")
|
fs.ContentMD5 = resp.Header.Get("Content-MD5")
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
@ -221,8 +264,10 @@ func (f *File) Exists() (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchAttributes updates metadata and properties for this file.
|
// FetchAttributes updates metadata and properties for this file.
|
||||||
func (f *File) FetchAttributes() error {
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
|
||||||
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead)
|
func (f *File) FetchAttributes(options *FileRequestOptions) error {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -242,17 +287,26 @@ func isRangeTooBig(fileRange FileRange) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListRangesOptions includes options for a list file ranges operation
|
||||||
|
type ListRangesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
ListRange *FileRange
|
||||||
|
}
|
||||||
|
|
||||||
// ListRanges returns the list of valid ranges for this file.
|
// ListRanges returns the list of valid ranges for this file.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
|
||||||
func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
|
||||||
params := url.Values{"comp": {"rangelist"}}
|
params := url.Values{"comp": {"rangelist"}}
|
||||||
|
|
||||||
// add optional range to list
|
// add optional range to list
|
||||||
var headers map[string]string
|
var headers map[string]string
|
||||||
if listRange != nil {
|
if options != nil {
|
||||||
headers = make(map[string]string)
|
params = addTimeout(params, options.Timeout)
|
||||||
headers["Range"] = listRange.String()
|
if options.ListRange != nil {
|
||||||
|
headers = make(map[string]string)
|
||||||
|
headers["Range"] = options.ListRange.String()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
||||||
|
@ -260,25 +314,25 @@ func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
var cl uint64
|
var cl uint64
|
||||||
cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
|
cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ioutil.ReadAll(resp.body)
|
ioutil.ReadAll(resp.Body)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var out FileRanges
|
var out FileRanges
|
||||||
out.ContentLength = cl
|
out.ContentLength = cl
|
||||||
out.ETag = resp.headers.Get("ETag")
|
out.ETag = resp.Header.Get("ETag")
|
||||||
out.LastModified = resp.headers.Get("Last-Modified")
|
out.LastModified = resp.Header.Get("Last-Modified")
|
||||||
|
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
return &out, err
|
return &out, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// modifies a range of bytes in this file
|
// modifies a range of bytes in this file
|
||||||
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) {
|
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
|
||||||
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -289,7 +343,12 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str
|
||||||
return nil, errors.New("range cannot exceed 4MB in size")
|
return nil, errors.New("range cannot exceed 4MB in size")
|
||||||
}
|
}
|
||||||
|
|
||||||
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}})
|
params := url.Values{"comp": {"range"}}
|
||||||
|
if timeout != nil {
|
||||||
|
params = addTimeout(params, *timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
|
||||||
|
|
||||||
// default to clear
|
// default to clear
|
||||||
write := "clear"
|
write := "clear"
|
||||||
|
@ -316,8 +375,8 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
return resp.Header, checkRespCode(resp, []int{http.StatusCreated})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata replaces the metadata for this file.
|
// SetMetadata replaces the metadata for this file.
|
||||||
|
@ -327,9 +386,9 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
|
||||||
func (f *File) SetMetadata() error {
|
func (f *File) SetMetadata(options *FileRequestOptions) error {
|
||||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil))
|
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -345,9 +404,9 @@ func (f *File) SetMetadata() error {
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
|
||||||
func (f *File) SetProperties() error {
|
func (f *File) SetProperties(options *FileRequestOptions) error {
|
||||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties))
|
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -362,14 +421,6 @@ func (f *File) updateEtagAndLastModified(headers http.Header) {
|
||||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
f.Properties.LastModified = headers.Get("Last-Modified")
|
||||||
}
|
}
|
||||||
|
|
||||||
// updates Etag, last modified date and x-ms-copy-id
|
|
||||||
func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) {
|
|
||||||
f.Properties.Etag = headers.Get("Etag")
|
|
||||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
|
||||||
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
|
|
||||||
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
|
|
||||||
}
|
|
||||||
|
|
||||||
// updates file properties from the specified HTTP header
|
// updates file properties from the specified HTTP header
|
||||||
func (f *File) updateProperties(header http.Header) {
|
func (f *File) updateProperties(header http.Header) {
|
||||||
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
|
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
|
||||||
|
@ -390,23 +441,40 @@ func (f *File) updateProperties(header http.Header) {
|
||||||
// This method does not create a publicly accessible URL if the file
|
// This method does not create a publicly accessible URL if the file
|
||||||
// is private and this method does not check if the file exists.
|
// is private and this method does not check if the file exists.
|
||||||
func (f *File) URL() string {
|
func (f *File) URL() string {
|
||||||
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{})
|
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content.
|
// WriteRangeOptions includes options for a write file range operation
|
||||||
// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB.
|
type WriteRangeOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
ContentMD5 string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
|
||||||
|
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
|
||||||
|
// a maximum size of 4MB.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
|
||||||
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error {
|
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
|
||||||
if bytes == nil {
|
if bytes == nil {
|
||||||
return errors.New("bytes cannot be nil")
|
return errors.New("bytes cannot be nil")
|
||||||
}
|
}
|
||||||
|
var timeout *uint
|
||||||
|
var md5 *string
|
||||||
|
if options != nil {
|
||||||
|
timeout = &options.Timeout
|
||||||
|
md5 = &options.ContentMD5
|
||||||
|
}
|
||||||
|
|
||||||
headers, err := f.modifyRange(bytes, fileRange, contentMD5)
|
headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// it's perfectly legal for multiple go routines to call WriteRange
|
||||||
|
// on the same *File (e.g. concurrently writing non-overlapping ranges)
|
||||||
|
// so we must take the file mutex before updating our properties.
|
||||||
|
f.mutex.Lock()
|
||||||
f.updateEtagAndLastModified(headers)
|
f.updateEtagAndLastModified(headers)
|
||||||
|
f.mutex.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
149
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
generated
vendored
149
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
generated
vendored
|
@ -1,11 +1,25 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
// FileServiceClient contains operations for Microsoft Azure File Service.
|
||||||
|
@ -17,7 +31,7 @@ type FileServiceClient struct {
|
||||||
// ListSharesParameters defines the set of customizable parameters to make a
|
// ListSharesParameters defines the set of customizable parameters to make a
|
||||||
// List Shares call.
|
// List Shares call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
||||||
type ListSharesParameters struct {
|
type ListSharesParameters struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Marker string
|
Marker string
|
||||||
|
@ -29,7 +43,7 @@ type ListSharesParameters struct {
|
||||||
// ShareListResponse contains the response fields from
|
// ShareListResponse contains the response fields from
|
||||||
// ListShares call.
|
// ListShares call.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
|
||||||
type ShareListResponse struct {
|
type ShareListResponse struct {
|
||||||
XMLName xml.Name `xml:"EnumerationResults"`
|
XMLName xml.Name `xml:"EnumerationResults"`
|
||||||
Xmlns string `xml:"xmlns,attr"`
|
Xmlns string `xml:"xmlns,attr"`
|
||||||
|
@ -79,10 +93,10 @@ func (p ListSharesParameters) getParameters() url.Values {
|
||||||
out.Set("include", p.Include)
|
out.Set("include", p.Include)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
}
|
||||||
if p.Timeout != 0 {
|
if p.Timeout != 0 {
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
@ -91,15 +105,16 @@ func (p ListSharesParameters) getParameters() url.Values {
|
||||||
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
||||||
out := url.Values{}
|
out := url.Values{}
|
||||||
|
|
||||||
|
if p.Prefix != "" {
|
||||||
|
out.Set("prefix", p.Prefix)
|
||||||
|
}
|
||||||
if p.Marker != "" {
|
if p.Marker != "" {
|
||||||
out.Set("marker", p.Marker)
|
out.Set("marker", p.Marker)
|
||||||
}
|
}
|
||||||
if p.MaxResults != 0 {
|
if p.MaxResults != 0 {
|
||||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||||
}
|
|
||||||
if p.Timeout != 0 {
|
|
||||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
|
||||||
}
|
}
|
||||||
|
out = addTimeout(out, p.Timeout)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
@ -117,9 +132,9 @@ func getURLInitValues(comp compType, res resourceType) url.Values {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetShareReference returns a Share object for the specified share name.
|
// GetShareReference returns a Share object for the specified share name.
|
||||||
func (f FileServiceClient) GetShareReference(name string) Share {
|
func (f *FileServiceClient) GetShareReference(name string) *Share {
|
||||||
return Share{
|
return &Share{
|
||||||
fsc: &f,
|
fsc: f,
|
||||||
Name: name,
|
Name: name,
|
||||||
Properties: ShareProperties{
|
Properties: ShareProperties{
|
||||||
Quota: -1,
|
Quota: -1,
|
||||||
|
@ -130,7 +145,7 @@ func (f FileServiceClient) GetShareReference(name string) Share {
|
||||||
// ListShares returns the list of shares in a storage account along with
|
// ListShares returns the list of shares in a storage account along with
|
||||||
// pagination token and other response details.
|
// pagination token and other response details.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
|
||||||
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
||||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||||
|
|
||||||
|
@ -139,8 +154,8 @@ func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListRe
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
|
|
||||||
// assign our client to the newly created Share objects
|
// assign our client to the newly created Share objects
|
||||||
for i := range out.Shares {
|
for i := range out.Shares {
|
||||||
|
@ -164,7 +179,7 @@ func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieves directory or share content
|
// retrieves directory or share content
|
||||||
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -178,8 +193,8 @@ func (f FileServiceClient) listContent(path string, params url.Values, extraHead
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
readAndCloseBody(resp.body)
|
drainRespBody(resp)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,9 +212,9 @@ func (f FileServiceClient) resourceExists(path string, res resourceType) (bool,
|
||||||
|
|
||||||
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
|
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
|
||||||
return resp.statusCode == http.StatusOK, resp.headers, nil
|
return resp.StatusCode == http.StatusOK, resp.Header, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
|
@ -211,12 +226,12 @@ func (f FileServiceClient) createResource(path string, res resourceType, urlPara
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes)
|
return resp.Header, checkRespCode(resp, expectedResponseCodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates a resource depending on the specified resource type, doesn't close the response body
|
// creates a resource depending on the specified resource type, doesn't close the response body
|
||||||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -231,51 +246,50 @@ func (f FileServiceClient) createResourceNoClose(path string, res resourceType,
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns HTTP header data for the specified directory or share
|
// returns HTTP header data for the specified directory or share
|
||||||
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) {
|
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
|
||||||
resp, err := f.getResourceNoClose(path, comp, res, verb, nil)
|
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp.headers, nil
|
return resp.Header, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets the specified resource, doesn't close the response body
|
// gets the specified resource, doesn't close the response body
|
||||||
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) {
|
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
params := getURLInitValues(comp, res)
|
params = mergeParams(params, getURLInitValues(comp, res))
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||||
|
|
||||||
return f.client.exec(verb, uri, headers, nil, f.auth)
|
return f.client.exec(verb, uri, headers, nil, f.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletes the resource and returns the response
|
// deletes the resource and returns the response
|
||||||
func (f FileServiceClient) deleteResource(path string, res resourceType) error {
|
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
|
||||||
resp, err := f.deleteResourceNoClose(path, res)
|
resp, err := f.deleteResourceNoClose(path, res, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
return checkRespCode(resp, []int{http.StatusAccepted})
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletes the resource and returns the response, doesn't close the response body
|
// deletes the resource and returns the response, doesn't close the response body
|
||||||
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) {
|
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
values := getURLInitValues(compNone, res)
|
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
uri := f.client.getEndpoint(fileServiceName, path, values)
|
||||||
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
||||||
}
|
}
|
||||||
|
@ -294,21 +308,13 @@ func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[strin
|
||||||
return extraHeaders
|
return extraHeaders
|
||||||
}
|
}
|
||||||
|
|
||||||
// merges extraHeaders into headers and returns headers
|
|
||||||
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
|
||||||
for k, v := range extraHeaders {
|
|
||||||
headers[k] = v
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
// sets extra header data for the specified resource
|
// sets extra header data for the specified resource
|
||||||
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) {
|
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
if err := f.checkForStorageEmulator(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
params := getURLInitValues(comp, res)
|
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
|
||||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||||
|
@ -317,52 +323,9 @@ func (f FileServiceClient) setResourceHeaders(path string, comp compType, res re
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
return resp.Header, checkRespCode(resp, []int{http.StatusOK})
|
||||||
}
|
|
||||||
|
|
||||||
// gets metadata for the specified resource
|
|
||||||
func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) {
|
|
||||||
if err := f.checkForStorageEmulator(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return getMetadataFromHeaders(headers), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns a map of custom metadata values from the specified HTTP header
|
|
||||||
func getMetadataFromHeaders(header http.Header) map[string]string {
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
for k, v := range header {
|
|
||||||
// Can't trust CanonicalHeaderKey() to munge case
|
|
||||||
// reliably. "_" is allowed in identifiers:
|
|
||||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
|
||||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
|
||||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
|
||||||
// ...but "_" is considered invalid by
|
|
||||||
// CanonicalMIMEHeaderKey in
|
|
||||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
|
||||||
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
|
|
||||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
|
||||||
metadata[k] = v[len(v)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metadata) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//checkForStorageEmulator determines if the client is setup for use with
|
//checkForStorageEmulator determines if the client is setup for use with
|
||||||
|
|
201
vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
generated
vendored
Normal file
201
vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lease constants.
|
||||||
|
const (
|
||||||
|
leaseHeaderPrefix = "x-ms-lease-"
|
||||||
|
headerLeaseID = "x-ms-lease-id"
|
||||||
|
leaseAction = "x-ms-lease-action"
|
||||||
|
leaseBreakPeriod = "x-ms-lease-break-period"
|
||||||
|
leaseDuration = "x-ms-lease-duration"
|
||||||
|
leaseProposedID = "x-ms-proposed-lease-id"
|
||||||
|
leaseTime = "x-ms-lease-time"
|
||||||
|
|
||||||
|
acquireLease = "acquire"
|
||||||
|
renewLease = "renew"
|
||||||
|
changeLease = "change"
|
||||||
|
releaseLease = "release"
|
||||||
|
breakLease = "break"
|
||||||
|
)
|
||||||
|
|
||||||
|
// leasePut is common PUT code for the various acquire/release/break etc functions.
|
||||||
|
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
|
||||||
|
params := url.Values{"comp": {"lease"}}
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err := checkRespCode(resp, []int{expectedStatus}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.Header, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOptions includes options for all operations regarding leasing blobs
|
||||||
|
type LeaseOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Origin string `header:"Origin"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcquireLease creates a lease for a blob
|
||||||
|
// returns leaseID acquired
|
||||||
|
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
|
||||||
|
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = acquireLease
|
||||||
|
|
||||||
|
if leaseTimeInSeconds == -1 {
|
||||||
|
// Do nothing, but don't trigger the following clauses.
|
||||||
|
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
|
||||||
|
leaseTimeInSeconds = 60
|
||||||
|
} else if leaseTimeInSeconds < 15 {
|
||||||
|
leaseTimeInSeconds = 15
|
||||||
|
}
|
||||||
|
|
||||||
|
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
|
||||||
|
|
||||||
|
if proposedLeaseID != "" {
|
||||||
|
headers[leaseProposedID] = proposedLeaseID
|
||||||
|
}
|
||||||
|
|
||||||
|
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
||||||
|
|
||||||
|
if returnedLeaseID != "" {
|
||||||
|
return returnedLeaseID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("LeaseID not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
// BreakLease breaks the lease for a blob
|
||||||
|
// Returns the timeout remaining in the lease in seconds
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = breakLease
|
||||||
|
return b.breakLeaseCommon(headers, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BreakLeaseWithBreakPeriod breaks the lease for a blob
|
||||||
|
// breakPeriodInSeconds is used to determine how long until new lease can be created.
|
||||||
|
// Returns the timeout remaining in the lease in seconds
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = breakLease
|
||||||
|
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
|
||||||
|
return b.breakLeaseCommon(headers, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
|
||||||
|
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
|
||||||
|
|
||||||
|
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
|
||||||
|
if breakTimeoutStr != "" {
|
||||||
|
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return breakTimeout, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangeLease changes a lease ID for a blob
|
||||||
|
// Returns the new LeaseID acquired
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = changeLease
|
||||||
|
headers[headerLeaseID] = currentLeaseID
|
||||||
|
headers[leaseProposedID] = proposedLeaseID
|
||||||
|
|
||||||
|
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
||||||
|
if newLeaseID != "" {
|
||||||
|
return newLeaseID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("LeaseID not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleaseLease releases the lease for a blob
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
|
||||||
|
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = releaseLease
|
||||||
|
headers[headerLeaseID] = currentLeaseID
|
||||||
|
|
||||||
|
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
|
||||||
|
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers[leaseAction] = renewLease
|
||||||
|
headers[headerLeaseID] = currentLeaseID
|
||||||
|
|
||||||
|
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
170
vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
generated
vendored
Normal file
170
vendor/github.com/Azure/azure-sdk-for-go/storage/message.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Message represents an Azure message.
|
||||||
|
type Message struct {
|
||||||
|
Queue *Queue
|
||||||
|
Text string `xml:"MessageText"`
|
||||||
|
ID string `xml:"MessageId"`
|
||||||
|
Insertion TimeRFC1123 `xml:"InsertionTime"`
|
||||||
|
Expiration TimeRFC1123 `xml:"ExpirationTime"`
|
||||||
|
PopReceipt string `xml:"PopReceipt"`
|
||||||
|
NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
|
||||||
|
DequeueCount int `xml:"DequeueCount"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) buildPath() string {
|
||||||
|
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutMessageOptions is the set of options can be specified for Put Messsage
|
||||||
|
// operation. A zero struct does not use any preferences for the request.
|
||||||
|
type PutMessageOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
VisibilityTimeout int
|
||||||
|
MessageTTL int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put operation adds a new message to the back of the message queue.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
|
||||||
|
func (m *Message) Put(options *PutMessageOptions) error {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := m.Queue.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
req := putMessageRequest{MessageText: m.Text}
|
||||||
|
body, nn, err := xmlMarshal(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers["Content-Length"] = strconv.Itoa(nn)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.VisibilityTimeout != 0 {
|
||||||
|
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
||||||
|
}
|
||||||
|
if options.MessageTTL != 0 {
|
||||||
|
query.Set("messagettl", strconv.Itoa(options.MessageTTL))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
|
||||||
|
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
err = checkRespCode(resp, []int{http.StatusCreated})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = xmlUnmarshal(resp.Body, m)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMessageOptions is the set of options can be specified for Update Messsage
|
||||||
|
// operation. A zero struct does not use any preferences for the request.
|
||||||
|
type UpdateMessageOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
VisibilityTimeout int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update operation updates the specified message.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
|
||||||
|
func (m *Message) Update(options *UpdateMessageOptions) error {
|
||||||
|
query := url.Values{}
|
||||||
|
if m.PopReceipt != "" {
|
||||||
|
query.Set("popreceipt", m.PopReceipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := m.Queue.qsc.client.getStandardHeaders()
|
||||||
|
req := putMessageRequest{MessageText: m.Text}
|
||||||
|
body, nn, err := xmlMarshal(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headers["Content-Length"] = strconv.Itoa(nn)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
if options.VisibilityTimeout != 0 {
|
||||||
|
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
|
||||||
|
|
||||||
|
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
m.PopReceipt = resp.Header.Get("x-ms-popreceipt")
|
||||||
|
nextTimeStr := resp.Header.Get("x-ms-time-next-visible")
|
||||||
|
if nextTimeStr != "" {
|
||||||
|
nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.NextVisible = TimeRFC1123(nextTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete operation deletes the specified message.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
||||||
|
func (m *Message) Delete(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{"popreceipt": {m.PopReceipt}}
|
||||||
|
headers := m.Queue.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
type putMessageRequest struct {
|
||||||
|
XMLName xml.Name `xml:"QueueMessage"`
|
||||||
|
MessageText string `xml:"MessageText"`
|
||||||
|
}
|
47
vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
generated
vendored
Normal file
47
vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// MetadataLevel determines if operations should return a paylod,
|
||||||
|
// and it level of detail.
|
||||||
|
type MetadataLevel string
|
||||||
|
|
||||||
|
// This consts are meant to help with Odata supported operations
|
||||||
|
const (
|
||||||
|
OdataTypeSuffix = "@odata.type"
|
||||||
|
|
||||||
|
// Types
|
||||||
|
|
||||||
|
OdataBinary = "Edm.Binary"
|
||||||
|
OdataDateTime = "Edm.DateTime"
|
||||||
|
OdataGUID = "Edm.Guid"
|
||||||
|
OdataInt64 = "Edm.Int64"
|
||||||
|
|
||||||
|
// Query options
|
||||||
|
|
||||||
|
OdataFilter = "$filter"
|
||||||
|
OdataOrderBy = "$orderby"
|
||||||
|
OdataTop = "$top"
|
||||||
|
OdataSkip = "$skip"
|
||||||
|
OdataCount = "$count"
|
||||||
|
OdataExpand = "$expand"
|
||||||
|
OdataSelect = "$select"
|
||||||
|
OdataSearch = "$search"
|
||||||
|
|
||||||
|
EmptyPayload MetadataLevel = ""
|
||||||
|
NoMetadata MetadataLevel = "application/json;odata=nometadata"
|
||||||
|
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
|
||||||
|
FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
|
||||||
|
)
|
203
vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
generated
vendored
Normal file
203
vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetPageRangesResponse contains the response fields from
|
||||||
|
// Get Page Ranges call.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||||
|
type GetPageRangesResponse struct {
|
||||||
|
XMLName xml.Name `xml:"PageList"`
|
||||||
|
PageList []PageRange `xml:"PageRange"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageRange contains information about a page of a page blob from
|
||||||
|
// Get Pages Range call.
|
||||||
|
//
|
||||||
|
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||||
|
type PageRange struct {
|
||||||
|
Start int64 `xml:"Start"`
|
||||||
|
End int64 `xml:"End"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
|
||||||
|
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PutPageOptions includes the options for a put page operation
|
||||||
|
type PutPageOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
|
||||||
|
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
|
||||||
|
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
|
||||||
|
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||||
|
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||||
|
IfMatch string `header:"If-Match"`
|
||||||
|
IfNoneMatch string `header:"If-None-Match"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteRange writes a range of pages to a page blob.
|
||||||
|
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
||||||
|
// multiplies by 512.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
||||||
|
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
|
||||||
|
if bytes == nil {
|
||||||
|
return errors.New("bytes cannot be nil")
|
||||||
|
}
|
||||||
|
return b.modifyRange(blobRange, bytes, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearRange clears the given range in a page blob.
|
||||||
|
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
|
||||||
|
// multiplies by 512.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
|
||||||
|
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
|
||||||
|
return b.modifyRange(blobRange, nil, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
|
||||||
|
if blobRange.End < blobRange.Start {
|
||||||
|
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
|
||||||
|
}
|
||||||
|
if blobRange.Start%512 != 0 {
|
||||||
|
return errors.New("the value for rangeStart must be a multiple of 512")
|
||||||
|
}
|
||||||
|
if blobRange.End%512 != 511 {
|
||||||
|
return errors.New("the value for rangeEnd must be a multiple of 512 - 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
params := url.Values{"comp": {"page"}}
|
||||||
|
|
||||||
|
// default to clear
|
||||||
|
write := "clear"
|
||||||
|
var cl uint64
|
||||||
|
|
||||||
|
// if bytes is not nil then this is an update operation
|
||||||
|
if bytes != nil {
|
||||||
|
write = "update"
|
||||||
|
cl = (blobRange.End - blobRange.Start) + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||||
|
headers["x-ms-page-write"] = write
|
||||||
|
headers["x-ms-range"] = blobRange.String()
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%v", cl)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPageRangesOptions includes the options for a get page ranges operation
|
||||||
|
type GetPageRangesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
Snapshot *time.Time
|
||||||
|
PreviousSnapshot *time.Time
|
||||||
|
Range *BlobRange
|
||||||
|
LeaseID string `header:"x-ms-lease-id"`
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPageRanges returns the list of valid page ranges for a page blob.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
|
||||||
|
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
|
||||||
|
params := url.Values{"comp": {"pagelist"}}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
params = addSnapshot(params, options.Snapshot)
|
||||||
|
if options.PreviousSnapshot != nil {
|
||||||
|
params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot))
|
||||||
|
}
|
||||||
|
if options.Range != nil {
|
||||||
|
headers["Range"] = options.Range.String()
|
||||||
|
}
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
var out GetPageRangesResponse
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutPageBlob initializes an empty page blob with specified name and maximum
|
||||||
|
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
|
||||||
|
// be created using this method before writing pages.
|
||||||
|
//
|
||||||
|
// See CreateBlockBlobFromReader for more info on creating blobs.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||||
|
func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
|
||||||
|
if b.Properties.ContentLength%512 != 0 {
|
||||||
|
return errors.New("Content length must be aligned to a 512-byte boundary")
|
||||||
|
}
|
||||||
|
|
||||||
|
params := url.Values{}
|
||||||
|
headers := b.Container.bsc.client.getStandardHeaders()
|
||||||
|
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||||
|
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
|
||||||
|
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||||
|
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.respondCreation(resp, BlobTypePage)
|
||||||
|
}
|
603
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
603
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
|
@ -1,339 +1,436 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// casing is per Golang's http.Header canonicalizing the header names.
|
// casing is per Golang's http.Header canonicalizing the header names.
|
||||||
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
|
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
|
||||||
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
|
// QueueAccessPolicy represents each access policy in the queue ACL.
|
||||||
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
|
type QueueAccessPolicy struct {
|
||||||
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
|
ID string
|
||||||
|
StartTime time.Time
|
||||||
type putMessageRequest struct {
|
ExpiryTime time.Time
|
||||||
XMLName xml.Name `xml:"QueueMessage"`
|
CanRead bool
|
||||||
MessageText string `xml:"MessageText"`
|
CanAdd bool
|
||||||
|
CanUpdate bool
|
||||||
|
CanProcess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutMessageParameters is the set of options can be specified for Put Messsage
|
// QueuePermissions represents the queue ACLs.
|
||||||
// operation. A zero struct does not use any preferences for the request.
|
type QueuePermissions struct {
|
||||||
type PutMessageParameters struct {
|
AccessPolicies []QueueAccessPolicy
|
||||||
VisibilityTimeout int
|
|
||||||
MessageTTL int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p PutMessageParameters) getParameters() url.Values {
|
// SetQueuePermissionOptions includes options for a set queue permissions operation
|
||||||
out := url.Values{}
|
type SetQueuePermissionOptions struct {
|
||||||
if p.VisibilityTimeout != 0 {
|
Timeout uint
|
||||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
}
|
|
||||||
if p.MessageTTL != 0 {
|
|
||||||
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMessagesParameters is the set of options can be specified for Get
|
// Queue represents an Azure queue.
|
||||||
// Messsages operation. A zero struct does not use any preferences for the
|
type Queue struct {
|
||||||
// request.
|
qsc *QueueServiceClient
|
||||||
type GetMessagesParameters struct {
|
Name string
|
||||||
NumOfMessages int
|
Metadata map[string]string
|
||||||
VisibilityTimeout int
|
AproxMessageCount uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p GetMessagesParameters) getParameters() url.Values {
|
func (q *Queue) buildPath() string {
|
||||||
out := url.Values{}
|
return fmt.Sprintf("/%s", q.Name)
|
||||||
if p.NumOfMessages != 0 {
|
|
||||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
|
||||||
}
|
|
||||||
if p.VisibilityTimeout != 0 {
|
|
||||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PeekMessagesParameters is the set of options can be specified for Peek
|
func (q *Queue) buildPathMessages() string {
|
||||||
// Messsage operation. A zero struct does not use any preferences for the
|
return fmt.Sprintf("%s/messages", q.buildPath())
|
||||||
// request.
|
|
||||||
type PeekMessagesParameters struct {
|
|
||||||
NumOfMessages int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p PeekMessagesParameters) getParameters() url.Values {
|
// QueueServiceOptions includes options for some queue service operations
|
||||||
out := url.Values{"peekonly": {"true"}} // Required for peek operation
|
type QueueServiceOptions struct {
|
||||||
if p.NumOfMessages != 0 {
|
Timeout uint
|
||||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateMessageParameters is the set of options can be specified for Update Messsage
|
// Create operation creates a queue under the given account.
|
||||||
// operation. A zero struct does not use any preferences for the request.
|
|
||||||
type UpdateMessageParameters struct {
|
|
||||||
PopReceipt string
|
|
||||||
VisibilityTimeout int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p UpdateMessageParameters) getParameters() url.Values {
|
|
||||||
out := url.Values{}
|
|
||||||
if p.PopReceipt != "" {
|
|
||||||
out.Set("popreceipt", p.PopReceipt)
|
|
||||||
}
|
|
||||||
if p.VisibilityTimeout != 0 {
|
|
||||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessagesResponse represents a response returned from Get Messages
|
|
||||||
// operation.
|
|
||||||
type GetMessagesResponse struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
|
||||||
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMessageResponse represents a QueueMessage object returned from Get
|
|
||||||
// Messages operation response.
|
|
||||||
type GetMessageResponse struct {
|
|
||||||
MessageID string `xml:"MessageId"`
|
|
||||||
InsertionTime string `xml:"InsertionTime"`
|
|
||||||
ExpirationTime string `xml:"ExpirationTime"`
|
|
||||||
PopReceipt string `xml:"PopReceipt"`
|
|
||||||
TimeNextVisible string `xml:"TimeNextVisible"`
|
|
||||||
DequeueCount int `xml:"DequeueCount"`
|
|
||||||
MessageText string `xml:"MessageText"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessagesResponse represents a response returned from Get Messages
|
|
||||||
// operation.
|
|
||||||
type PeekMessagesResponse struct {
|
|
||||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
|
||||||
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekMessageResponse represents a QueueMessage object returned from Peek
|
|
||||||
// Messages operation response.
|
|
||||||
type PeekMessageResponse struct {
|
|
||||||
MessageID string `xml:"MessageId"`
|
|
||||||
InsertionTime string `xml:"InsertionTime"`
|
|
||||||
ExpirationTime string `xml:"ExpirationTime"`
|
|
||||||
DequeueCount int `xml:"DequeueCount"`
|
|
||||||
MessageText string `xml:"MessageText"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueueMetadataResponse represents user defined metadata and queue
|
|
||||||
// properties on a specific queue.
|
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
|
||||||
type QueueMetadataResponse struct {
|
func (q *Queue) Create(options *QueueServiceOptions) error {
|
||||||
ApproximateMessageCount int
|
params := url.Values{}
|
||||||
UserDefinedMetadata map[string]string
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusCreated})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete operation permanently deletes the specified queue.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
|
||||||
|
func (q *Queue) Delete(options *QueueServiceOptions) error {
|
||||||
|
params := url.Values{}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists returns true if a queue with given name exists.
|
||||||
|
func (q *Queue) Exists() (bool, error) {
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
|
||||||
|
if resp != nil {
|
||||||
|
defer drainRespBody(resp)
|
||||||
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
|
||||||
|
return resp.StatusCode == http.StatusOK, nil
|
||||||
|
}
|
||||||
|
err = getErrorFromResponse(resp)
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata operation sets user-defined metadata on the specified queue.
|
// SetMetadata operation sets user-defined metadata on the specified queue.
|
||||||
// Metadata is associated with the queue as name-value pairs.
|
// Metadata is associated with the queue as name-value pairs.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
||||||
func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
|
func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
params := url.Values{"comp": {"metadata"}}
|
||||||
metadata = c.client.protectUserAgent(metadata)
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
headers := c.client.getStandardHeaders()
|
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
|
||||||
for k, v := range metadata {
|
|
||||||
headers[userDefinedMetadataHeaderPrefix+k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetadata operation retrieves user-defined metadata and queue
|
// GetMetadata operation retrieves user-defined metadata and queue
|
||||||
// properties on the specified queue. Metadata is associated with
|
// properties on the specified queue. Metadata is associated with
|
||||||
// the queue as name-values pairs.
|
// the queue as name-values pairs.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
|
||||||
//
|
//
|
||||||
// Because the way Golang's http client (and http.Header in particular)
|
// Because the way Golang's http client (and http.Header in particular)
|
||||||
// canonicalize header names, the returned metadata names would always
|
// canonicalize header names, the returned metadata names would always
|
||||||
// be all lower case.
|
// be all lower case.
|
||||||
func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) {
|
func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
|
||||||
qm := QueueMetadataResponse{}
|
params := url.Values{"comp": {"metadata"}}
|
||||||
qm.UserDefinedMetadata = make(map[string]string)
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return qm, err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
|
|
||||||
for k, v := range resp.headers {
|
if options != nil {
|
||||||
if len(v) != 1 {
|
params = addTimeout(params, options.Timeout)
|
||||||
return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k)
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
}
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
||||||
value := v[0]
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
if k == approximateMessagesCountHeader {
|
if err != nil {
|
||||||
qm.ApproximateMessageCount, err = strconv.Atoi(value)
|
return err
|
||||||
if err != nil {
|
}
|
||||||
return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value)
|
defer drainRespBody(resp)
|
||||||
}
|
|
||||||
} else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) {
|
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix)
|
return err
|
||||||
qm.UserDefinedMetadata[strings.ToLower(name)] = value
|
}
|
||||||
|
|
||||||
|
aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
|
||||||
|
if aproxMessagesStr != "" {
|
||||||
|
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
q.AproxMessageCount = aproxMessages
|
||||||
}
|
}
|
||||||
|
|
||||||
return qm, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
q.Metadata = getMetadataFromHeaders(resp.Header)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateQueue operation creates a queue under the given account.
|
// GetMessageReference returns a message object with the specified text.
|
||||||
//
|
func (q *Queue) GetMessageReference(text string) *Message {
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
|
return &Message{
|
||||||
func (c QueueServiceClient) CreateQueue(name string) error {
|
Queue: q,
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
Text: text,
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteQueue operation permanently deletes the specified queue.
|
// GetMessagesOptions is the set of options can be specified for Get
|
||||||
//
|
// Messsages operation. A zero struct does not use any preferences for the
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
|
// request.
|
||||||
func (c QueueServiceClient) DeleteQueue(name string) error {
|
type GetMessagesOptions struct {
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
Timeout uint
|
||||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
NumOfMessages int
|
||||||
if err != nil {
|
VisibilityTimeout int
|
||||||
return err
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueueExists returns true if a queue with given name exists.
|
type messages struct {
|
||||||
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
|
XMLName xml.Name `xml:"QueueMessagesList"`
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
|
Messages []Message `xml:"QueueMessage"`
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
|
|
||||||
return resp.statusCode == http.StatusOK, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutMessage operation adds a new message to the back of the message queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
|
|
||||||
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
|
||||||
req := putMessageRequest{MessageText: message}
|
|
||||||
body, nn, err := xmlMarshal(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = strconv.Itoa(nn)
|
|
||||||
resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearMessages operation deletes all messages from the specified queue.
|
|
||||||
//
|
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
|
|
||||||
func (c QueueServiceClient) ClearMessages(queue string) error {
|
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
|
|
||||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer readAndCloseBody(resp.body)
|
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMessages operation retrieves one or more messages from the front of the
|
// GetMessages operation retrieves one or more messages from the front of the
|
||||||
// queue.
|
// queue.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
|
||||||
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
|
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
|
||||||
var r GetMessagesResponse
|
query := url.Values{}
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
if options != nil {
|
||||||
return r, err
|
if options.NumOfMessages != 0 {
|
||||||
|
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
|
||||||
|
}
|
||||||
|
if options.VisibilityTimeout != 0 {
|
||||||
|
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
|
||||||
err = xmlUnmarshal(resp.body, &r)
|
|
||||||
return r, err
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out messages
|
||||||
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
for i := range out.Messages {
|
||||||
|
out.Messages[i].Queue = q
|
||||||
|
}
|
||||||
|
return out.Messages, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeekMessagesOptions is the set of options can be specified for Peek
|
||||||
|
// Messsage operation. A zero struct does not use any preferences for the
|
||||||
|
// request.
|
||||||
|
type PeekMessagesOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
NumOfMessages int
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PeekMessages retrieves one or more messages from the front of the queue, but
|
// PeekMessages retrieves one or more messages from the front of the queue, but
|
||||||
// does not alter the visibility of the message.
|
// does not alter the visibility of the message.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
|
||||||
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
|
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
|
||||||
var r PeekMessagesResponse
|
query := url.Values{"peekonly": {"true"}} // Required for peek operation
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
if err != nil {
|
if options != nil {
|
||||||
return r, err
|
if options.NumOfMessages != 0 {
|
||||||
|
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
|
||||||
|
}
|
||||||
|
query = addTimeout(query, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
|
||||||
err = xmlUnmarshal(resp.body, &r)
|
|
||||||
return r, err
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out messages
|
||||||
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
|
if err != nil {
|
||||||
|
return []Message{}, err
|
||||||
|
}
|
||||||
|
for i := range out.Messages {
|
||||||
|
out.Messages[i].Queue = q
|
||||||
|
}
|
||||||
|
return out.Messages, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteMessage operation deletes the specified message.
|
// ClearMessages operation deletes all messages from the specified queue.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
|
||||||
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
|
func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
|
params := url.Values{}
|
||||||
"popreceipt": {popReceipt}})
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
|
||||||
|
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateMessage operation deletes the specified message.
|
// SetPermissions sets up queue permissions
|
||||||
//
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx
|
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
|
||||||
func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error {
|
body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
|
||||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters())
|
|
||||||
req := putMessageRequest{MessageText: message}
|
|
||||||
body, nn, err := xmlMarshal(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
headers := c.client.getStandardHeaders()
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", nn)
|
params := url.Values{
|
||||||
resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth)
|
"comp": {"acl"},
|
||||||
|
}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
|
||||||
|
sil := SignedIdentifiers{
|
||||||
|
SignedIdentifiers: []SignedIdentifier{},
|
||||||
|
}
|
||||||
|
for _, qapd := range policies {
|
||||||
|
permission := qapd.generateQueuePermissions()
|
||||||
|
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
|
||||||
|
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||||
|
}
|
||||||
|
return xmlMarshal(sil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
|
||||||
|
// generate the permissions string (raup).
|
||||||
|
// still want the end user API to have bool flags.
|
||||||
|
permissions = ""
|
||||||
|
|
||||||
|
if qapd.CanRead {
|
||||||
|
permissions += "r"
|
||||||
|
}
|
||||||
|
|
||||||
|
if qapd.CanAdd {
|
||||||
|
permissions += "a"
|
||||||
|
}
|
||||||
|
|
||||||
|
if qapd.CanUpdate {
|
||||||
|
permissions += "u"
|
||||||
|
}
|
||||||
|
|
||||||
|
if qapd.CanProcess {
|
||||||
|
permissions += "p"
|
||||||
|
}
|
||||||
|
|
||||||
|
return permissions
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueuePermissionOptions includes options for a get queue permissions operation
|
||||||
|
type GetQueuePermissionOptions struct {
|
||||||
|
Timeout uint
|
||||||
|
RequestID string `header:"x-ms-client-request-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
|
||||||
|
// If timeout is 0 then it will not be passed to Azure
|
||||||
|
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
|
||||||
|
params := url.Values{
|
||||||
|
"comp": {"acl"},
|
||||||
|
}
|
||||||
|
headers := q.qsc.client.getStandardHeaders()
|
||||||
|
|
||||||
|
if options != nil {
|
||||||
|
params = addTimeout(params, options.Timeout)
|
||||||
|
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||||
|
}
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||||
|
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var ap AccessPolicy
|
||||||
|
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buildQueueAccessPolicy(ap, &resp.Header), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
|
||||||
|
permissions := QueuePermissions{
|
||||||
|
AccessPolicies: []QueueAccessPolicy{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||||
|
qapd := QueueAccessPolicy{
|
||||||
|
ID: policy.ID,
|
||||||
|
StartTime: policy.AccessPolicy.StartTime,
|
||||||
|
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||||
|
}
|
||||||
|
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||||
|
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
|
||||||
|
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
||||||
|
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
|
||||||
|
|
||||||
|
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
|
||||||
|
}
|
||||||
|
return &permissions
|
||||||
}
|
}
|
||||||
|
|
146
vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
generated
vendored
Normal file
146
vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueueSASOptions are options to construct a blob SAS
|
||||||
|
// URI.
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
type QueueSASOptions struct {
|
||||||
|
QueueSASPermissions
|
||||||
|
SASOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueueSASPermissions includes the available permissions for
|
||||||
|
// a queue SAS URI.
|
||||||
|
type QueueSASPermissions struct {
|
||||||
|
Read bool
|
||||||
|
Add bool
|
||||||
|
Update bool
|
||||||
|
Process bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q QueueSASPermissions) buildString() string {
|
||||||
|
permissions := ""
|
||||||
|
|
||||||
|
if q.Read {
|
||||||
|
permissions += "r"
|
||||||
|
}
|
||||||
|
if q.Add {
|
||||||
|
permissions += "a"
|
||||||
|
}
|
||||||
|
if q.Update {
|
||||||
|
permissions += "u"
|
||||||
|
}
|
||||||
|
if q.Process {
|
||||||
|
permissions += "p"
|
||||||
|
}
|
||||||
|
return permissions
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSASURI creates an URL to the specified queue which contains the Shared
|
||||||
|
// Access Signature with specified permissions and expiration time.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||||
|
func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) {
|
||||||
|
canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
|
||||||
|
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
|
||||||
|
// later, the storage account name, and the resource name, and must be URL-decoded.
|
||||||
|
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||||
|
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
||||||
|
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
||||||
|
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
signedStart := ""
|
||||||
|
if options.Start != (time.Time{}) {
|
||||||
|
signedStart = options.Start.UTC().Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
signedExpiry := options.Expiry.UTC().Format(time.RFC3339)
|
||||||
|
|
||||||
|
protocols := "https,http"
|
||||||
|
if options.UseHTTPS {
|
||||||
|
protocols = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
permissions := options.QueueSASPermissions.buildString()
|
||||||
|
stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := q.qsc.client.computeHmac256(stringToSign)
|
||||||
|
sasParams := url.Values{
|
||||||
|
"sv": {q.qsc.client.apiVersion},
|
||||||
|
"se": {signedExpiry},
|
||||||
|
"sp": {permissions},
|
||||||
|
"sig": {sig},
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.qsc.client.apiVersion >= "2015-04-05" {
|
||||||
|
sasParams.Add("spr", protocols)
|
||||||
|
addQueryParameter(sasParams, "sip", options.IP)
|
||||||
|
}
|
||||||
|
|
||||||
|
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil)
|
||||||
|
sasURL, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
sasURL.RawQuery = sasParams.Encode()
|
||||||
|
return sasURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) {
|
||||||
|
|
||||||
|
if signedVersion >= "2015-02-21" {
|
||||||
|
canonicalizedResource = "/queue" + canonicalizedResource
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
||||||
|
if signedVersion >= "2015-04-05" {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
|
||||||
|
signedPermissions,
|
||||||
|
signedStart,
|
||||||
|
signedExpiry,
|
||||||
|
canonicalizedResource,
|
||||||
|
signedIdentifier,
|
||||||
|
signedIP,
|
||||||
|
protocols,
|
||||||
|
signedVersion), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||||
|
if signedVersion >= "2013-08-15" {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||||
|
}
|
30
vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go
generated
vendored
30
vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||||
// Service.
|
// Service.
|
||||||
type QueueServiceClient struct {
|
type QueueServiceClient struct {
|
||||||
|
@ -9,12 +23,20 @@ type QueueServiceClient struct {
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's queue service.
|
// GetServiceProperties gets the properties of your storage account's queue service.
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
||||||
func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||||
return c.client.getServiceProperties(queueServiceName, c.auth)
|
return q.client.getServiceProperties(queueServiceName, q.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's queue service.
|
// SetServiceProperties sets the properties of your storage account's queue service.
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
||||||
func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||||
return c.client.setServiceProperties(props, queueServiceName, c.auth)
|
return q.client.setServiceProperties(props, queueServiceName, q.auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueueReference returns a Container object for the specified queue name.
|
||||||
|
func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
|
||||||
|
return &Queue{
|
||||||
|
qsc: q,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
94
vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
generated
vendored
94
vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -30,9 +44,15 @@ func (s *Share) buildPath() string {
|
||||||
// Create this share under the associated account.
|
// Create this share under the associated account.
|
||||||
// If a share with the same name already exists, the operation fails.
|
// If a share with the same name already exists, the operation fails.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
||||||
func (s *Share) Create() error {
|
func (s *Share) Create(options *FileRequestOptions) error {
|
||||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated})
|
extraheaders := map[string]string{}
|
||||||
|
if s.Properties.Quota > 0 {
|
||||||
|
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -45,17 +65,23 @@ func (s *Share) Create() error {
|
||||||
// it does not exist. Returns true if the share is newly created or false if
|
// it does not exist. Returns true if the share is newly created or false if
|
||||||
// the share already exists.
|
// the share already exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
|
||||||
func (s *Share) CreateIfNotExists() (bool, error) {
|
func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil)
|
extraheaders := map[string]string{}
|
||||||
|
if s.Properties.Quota > 0 {
|
||||||
|
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := prepareOptions(options)
|
||||||
|
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
|
||||||
if resp.statusCode == http.StatusCreated {
|
if resp.StatusCode == http.StatusCreated {
|
||||||
s.updateEtagAndLastModified(resp.headers)
|
s.updateEtagAndLastModified(resp.Header)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, s.FetchAttributes()
|
return false, s.FetchAttributes(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,20 +92,20 @@ func (s *Share) CreateIfNotExists() (bool, error) {
|
||||||
// and directories contained within it are later deleted during garbage
|
// and directories contained within it are later deleted during garbage
|
||||||
// collection. If the share does not exist the operation fails
|
// collection. If the share does not exist the operation fails
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
||||||
func (s *Share) Delete() error {
|
func (s *Share) Delete(options *FileRequestOptions) error {
|
||||||
return s.fsc.deleteResource(s.buildPath(), resourceShare)
|
return s.fsc.deleteResource(s.buildPath(), resourceShare, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteIfExists operation marks this share for deletion if it exists.
|
// DeleteIfExists operation marks this share for deletion if it exists.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
|
||||||
func (s *Share) DeleteIfExists() (bool, error) {
|
func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
|
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options)
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
|
||||||
return resp.statusCode == http.StatusAccepted, nil
|
return resp.StatusCode == http.StatusAccepted, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -97,8 +123,10 @@ func (s *Share) Exists() (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchAttributes retrieves metadata and properties for this share.
|
// FetchAttributes retrieves metadata and properties for this share.
|
||||||
func (s *Share) FetchAttributes() error {
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
|
||||||
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead)
|
func (s *Share) FetchAttributes(options *FileRequestOptions) error {
|
||||||
|
params := prepareOptions(options)
|
||||||
|
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -130,9 +158,9 @@ func (s *Share) ServiceClient() *FileServiceClient {
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
|
||||||
func (s *Share) SetMetadata() error {
|
func (s *Share) SetMetadata(options *FileRequestOptions) error {
|
||||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
|
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -148,15 +176,17 @@ func (s *Share) SetMetadata() error {
|
||||||
// are case-insensitive so case munging should not matter to other
|
// are case-insensitive so case munging should not matter to other
|
||||||
// applications either.
|
// applications either.
|
||||||
//
|
//
|
||||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
|
||||||
func (s *Share) SetProperties() error {
|
func (s *Share) SetProperties(options *FileRequestOptions) error {
|
||||||
if s.Properties.Quota < 1 || s.Properties.Quota > 5120 {
|
extraheaders := map[string]string{}
|
||||||
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
if s.Properties.Quota > 0 {
|
||||||
|
if s.Properties.Quota > 5120 {
|
||||||
|
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
||||||
|
}
|
||||||
|
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
|
||||||
}
|
}
|
||||||
|
|
||||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{
|
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options)
|
||||||
"x-ms-share-quota": strconv.Itoa(s.Properties.Quota),
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
14
vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
generated
vendored
14
vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
29
vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go
generated
vendored
29
vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go
generated
vendored
|
@ -1,9 +1,23 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServiceProperties represents the storage account service properties
|
// ServiceProperties represents the storage account service properties
|
||||||
|
@ -63,14 +77,14 @@ func (c Client) getServiceProperties(service string, auth authentication) (*Serv
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var out ServiceProperties
|
var out ServiceProperties
|
||||||
err = xmlUnmarshal(resp.body, &out)
|
err = xmlUnmarshal(resp.Body, &out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -106,13 +120,12 @@ func (c Client) setServiceProperties(props ServiceProperties, service string, au
|
||||||
}
|
}
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
headers := c.getStandardHeaders()
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
return checkRespCode(resp, []int{http.StatusAccepted})
|
||||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
|
||||||
}
|
}
|
||||||
|
|
385
vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
generated
vendored
385
vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -9,20 +23,19 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AzureTable is the typedef of the Azure Table name
|
|
||||||
type AzureTable string
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
tablesURIPath = "/Tables"
|
tablesURIPath = "/Tables"
|
||||||
|
nextTableQueryParameter = "NextTableName"
|
||||||
|
headerNextPartitionKey = "x-ms-continuation-NextPartitionKey"
|
||||||
|
headerNextRowKey = "x-ms-continuation-NextRowKey"
|
||||||
|
nextPartitionKeyQueryParameter = "NextPartitionKey"
|
||||||
|
nextRowKeyQueryParameter = "NextRowKey"
|
||||||
)
|
)
|
||||||
|
|
||||||
type createTableRequest struct {
|
|
||||||
TableName string `json:"TableName"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableAccessPolicy are used for SETTING table policies
|
// TableAccessPolicy are used for SETTING table policies
|
||||||
type TableAccessPolicy struct {
|
type TableAccessPolicy struct {
|
||||||
ID string
|
ID string
|
||||||
|
@ -34,140 +47,231 @@ type TableAccessPolicy struct {
|
||||||
CanDelete bool
|
CanDelete bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
|
// Table represents an Azure table.
|
||||||
|
type Table struct {
|
||||||
func (c *TableServiceClient) getStandardHeaders() map[string]string {
|
tsc *TableServiceClient
|
||||||
return map[string]string{
|
Name string `json:"TableName"`
|
||||||
"x-ms-version": "2015-02-21",
|
OdataEditLink string `json:"odata.editLink"`
|
||||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
OdataID string `json:"odata.id"`
|
||||||
"Accept": "application/json;odata=nometadata",
|
OdataMetadata string `json:"odata.metadata"`
|
||||||
"Accept-Charset": "UTF-8",
|
OdataType string `json:"odata.type"`
|
||||||
"Content-Type": "application/json",
|
|
||||||
userAgentHeader: c.client.userAgent,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryTables returns the tables created in the
|
// EntityQueryResult contains the response from
|
||||||
// *TableServiceClient storage account.
|
// ExecuteQuery and ExecuteQueryNextResults functions.
|
||||||
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
|
type EntityQueryResult struct {
|
||||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
OdataMetadata string `json:"odata.metadata"`
|
||||||
|
Entities []*Entity `json:"value"`
|
||||||
|
QueryNextLink
|
||||||
|
table *Table
|
||||||
|
}
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
type continuationToken struct {
|
||||||
headers["Content-Length"] = "0"
|
NextPartitionKey string
|
||||||
|
NextRowKey string
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
func (t *Table) buildPath() string {
|
||||||
|
return fmt.Sprintf("/%s", t.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Table) buildSpecificPath() string {
|
||||||
|
return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the referenced table.
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
|
||||||
|
func (t *Table) Get(timeout uint, ml MetadataLevel) error {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return errEmptyPayload
|
||||||
|
}
|
||||||
|
|
||||||
|
query := url.Values{
|
||||||
|
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
||||||
|
}
|
||||||
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query)
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
ioutil.ReadAll(resp.body)
|
return err
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
respBody, err := ioutil.ReadAll(resp.Body)
|
||||||
if _, err := buf.ReadFrom(resp.body); err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
err = json.Unmarshal(respBody, t)
|
||||||
var respArray queryTablesResponse
|
if err != nil {
|
||||||
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
|
return err
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
s := make([]AzureTable, len(respArray.TableName))
|
|
||||||
for i, elem := range respArray.TableName {
|
|
||||||
s[i] = AzureTable(elem.TableName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTable creates the table given the specific
|
// Create creates the referenced table.
|
||||||
// name. This function fails if the name is not compliant
|
// This function fails if the name is not compliant
|
||||||
// with the specification or the tables already exists.
|
// with the specification or the tables already exists.
|
||||||
func (c *TableServiceClient) CreateTable(table AzureTable) error {
|
// ml determines the level of detail of metadata in the operation response,
|
||||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
// or no data at all.
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
|
||||||
|
func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error {
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{
|
||||||
|
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
|
||||||
|
})
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
type createTableRequest struct {
|
||||||
|
TableName string `json:"TableName"`
|
||||||
req := createTableRequest{TableName: string(table)}
|
}
|
||||||
|
req := createTableRequest{TableName: t.Name}
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
|
|
||||||
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = addReturnContentHeaders(headers, ml)
|
||||||
resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth)
|
headers = addBodyRelatedHeaders(headers, buf.Len())
|
||||||
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
if ml == EmptyPayload {
|
||||||
return err
|
if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(data, t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteTable deletes the table given the specific
|
// Delete deletes the referenced table.
|
||||||
// name. This function fails if the table is not present.
|
// This function fails if the table is not present.
|
||||||
// Be advised: DeleteTable deletes all the entries
|
// Be advised: Delete deletes all the entries that may be present.
|
||||||
// that may be present.
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
|
||||||
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
|
func (t *Table) Delete(timeout uint, options *TableOptions) error {
|
||||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{
|
||||||
uri += fmt.Sprintf("('%s')", string(table))
|
"timeout": {strconv.Itoa(int(timeout))},
|
||||||
|
})
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = addReturnContentHeaders(headers, EmptyPayload)
|
||||||
headers["Content-Length"] = "0"
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
return err
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL
|
// QueryOptions includes options for a query entities operation.
|
||||||
func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) {
|
// Top, filter and select are OData query options.
|
||||||
params := url.Values{"comp": {"acl"}}
|
type QueryOptions struct {
|
||||||
|
Top uint
|
||||||
|
Filter string
|
||||||
|
Select []string
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
if timeout > 0 {
|
func (options *QueryOptions) getParameters() (url.Values, map[string]string) {
|
||||||
params.Add("timeout", fmt.Sprint(timeout))
|
query := url.Values{}
|
||||||
|
headers := map[string]string{}
|
||||||
|
if options != nil {
|
||||||
|
if options.Top > 0 {
|
||||||
|
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
|
||||||
|
}
|
||||||
|
if options.Filter != "" {
|
||||||
|
query.Add(OdataFilter, options.Filter)
|
||||||
|
}
|
||||||
|
if len(options.Select) > 0 {
|
||||||
|
query.Add(OdataSelect, strings.Join(options.Select, ","))
|
||||||
|
}
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
}
|
||||||
|
return query, headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryEntities returns the entities in the table.
|
||||||
|
// You can use query options defined by the OData Protocol specification.
|
||||||
|
//
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
||||||
|
func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return nil, errEmptyPayload
|
||||||
|
}
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
query = addTimeout(query, timeout)
|
||||||
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query)
|
||||||
|
return t.queryEntities(uri, headers, ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextResults returns the next page of results
|
||||||
|
// from a QueryEntities or NextResults operation.
|
||||||
|
//
|
||||||
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
||||||
|
func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) {
|
||||||
|
if eqr == nil {
|
||||||
|
return nil, errNilPreviousResult
|
||||||
|
}
|
||||||
|
if eqr.NextLink == nil {
|
||||||
|
return nil, errNilNextLink
|
||||||
|
}
|
||||||
|
headers := options.addToHeaders(map[string]string{})
|
||||||
|
return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPermissions sets up table ACL permissions
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
|
||||||
|
func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error {
|
||||||
|
params := url.Values{"comp": {"acl"},
|
||||||
|
"timeout": {strconv.Itoa(int(timeout))},
|
||||||
}
|
}
|
||||||
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
|
||||||
headers := c.client.getStandardHeaders()
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
body, length, err := generateTableACLPayload(policies)
|
body, length, err := generateTableACLPayload(tap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
headers["Content-Length"] = strconv.Itoa(length)
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth)
|
resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readAndCloseBody(resp.body)
|
defer drainRespBody(resp)
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
return checkRespCode(resp, []int{http.StatusNoContent})
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
||||||
|
@ -182,38 +286,99 @@ func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, erro
|
||||||
return xmlMarshal(sil)
|
return xmlMarshal(sil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl
|
// GetPermissions gets the table ACL permissions
|
||||||
func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) {
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
|
||||||
params := url.Values{"comp": {"acl"}}
|
func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) {
|
||||||
|
params := url.Values{"comp": {"acl"},
|
||||||
if timeout > 0 {
|
"timeout": {strconv.Itoa(int(timeout))},
|
||||||
params.Add("timeout", strconv.Itoa(timeout))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
|
||||||
headers := c.client.getStandardHeaders()
|
headers := t.tsc.client.getStandardHeaders()
|
||||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
headers = options.addToHeaders(headers)
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
ioutil.ReadAll(resp.body)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var ap AccessPolicy
|
var ap AccessPolicy
|
||||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
out := updateTableAccessPolicy(ap)
|
return updateTableAccessPolicy(ap), nil
|
||||||
return out, nil
|
}
|
||||||
|
|
||||||
|
func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) {
|
||||||
|
headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders())
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var entities EntityQueryResult
|
||||||
|
err = json.Unmarshal(data, &entities)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range entities.Entities {
|
||||||
|
entities.Entities[i].Table = t
|
||||||
|
}
|
||||||
|
entities.table = t
|
||||||
|
|
||||||
|
contToken := extractContinuationTokenFromHeaders(resp.Header)
|
||||||
|
if contToken == nil {
|
||||||
|
entities.NextLink = nil
|
||||||
|
} else {
|
||||||
|
originalURI, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v := originalURI.Query()
|
||||||
|
v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey)
|
||||||
|
v.Set(nextRowKeyQueryParameter, contToken.NextRowKey)
|
||||||
|
newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v)
|
||||||
|
entities.NextLink = &newURI
|
||||||
|
entities.ml = ml
|
||||||
|
}
|
||||||
|
|
||||||
|
return &entities, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractContinuationTokenFromHeaders(h http.Header) *continuationToken {
|
||||||
|
ct := continuationToken{
|
||||||
|
NextPartitionKey: h.Get(headerNextPartitionKey),
|
||||||
|
NextRowKey: h.Get(headerNextRowKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
|
||||||
|
return &ct
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
||||||
out := []TableAccessPolicy{}
|
taps := []TableAccessPolicy{}
|
||||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||||
tap := TableAccessPolicy{
|
tap := TableAccessPolicy{
|
||||||
ID: policy.ID,
|
ID: policy.ID,
|
||||||
|
@ -225,9 +390,9 @@ func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
||||||
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
||||||
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||||
|
|
||||||
out = append(out, tap)
|
taps = append(taps, tap)
|
||||||
}
|
}
|
||||||
return out
|
return taps
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
||||||
|
|
328
vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go
generated
vendored
Normal file
328
vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go
generated
vendored
Normal file
|
@ -0,0 +1,328 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/marstr/guid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Operation type. Insert, Delete, Replace etc.
|
||||||
|
type Operation int
|
||||||
|
|
||||||
|
// consts for batch operations.
|
||||||
|
const (
|
||||||
|
InsertOp = Operation(1)
|
||||||
|
DeleteOp = Operation(2)
|
||||||
|
ReplaceOp = Operation(3)
|
||||||
|
MergeOp = Operation(4)
|
||||||
|
InsertOrReplaceOp = Operation(5)
|
||||||
|
InsertOrMergeOp = Operation(6)
|
||||||
|
)
|
||||||
|
|
||||||
|
// BatchEntity used for tracking Entities to operate on and
|
||||||
|
// whether operations (replace/merge etc) should be forced.
|
||||||
|
// Wrapper for regular Entity with additional data specific for the entity.
|
||||||
|
type BatchEntity struct {
|
||||||
|
*Entity
|
||||||
|
Force bool
|
||||||
|
Op Operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableBatch stores all the entities that will be operated on during a batch process.
|
||||||
|
// Entities can be inserted, replaced or deleted.
|
||||||
|
type TableBatch struct {
|
||||||
|
BatchEntitySlice []BatchEntity
|
||||||
|
|
||||||
|
// reference to table we're operating on.
|
||||||
|
Table *Table
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultChangesetHeaders for changeSets
|
||||||
|
var defaultChangesetHeaders = map[string]string{
|
||||||
|
"Accept": "application/json;odata=minimalmetadata",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Prefer": "return-no-content",
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatch return new TableBatch for populating.
|
||||||
|
func (t *Table) NewBatch() *TableBatch {
|
||||||
|
return &TableBatch{
|
||||||
|
Table: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertEntity adds an entity in preparation for a batch insert.
|
||||||
|
func (t *TableBatch) InsertEntity(entity *Entity) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: InsertOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
|
||||||
|
func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
|
||||||
|
func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) {
|
||||||
|
t.InsertOrReplaceEntity(entity, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
|
||||||
|
func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
|
||||||
|
func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) {
|
||||||
|
t.InsertOrMergeEntity(entity, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceEntity adds an entity in preparation for a batch replace.
|
||||||
|
func (t *TableBatch) ReplaceEntity(entity *Entity) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteEntity adds an entity in preparation for a batch delete
|
||||||
|
func (t *TableBatch) DeleteEntity(entity *Entity, force bool) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
|
||||||
|
func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) {
|
||||||
|
t.DeleteEntity(entity, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeEntity adds an entity in preparation for a batch merge
|
||||||
|
func (t *TableBatch) MergeEntity(entity *Entity) {
|
||||||
|
be := BatchEntity{Entity: entity, Force: false, Op: MergeOp}
|
||||||
|
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecuteBatch executes many table operations in one request to Azure.
|
||||||
|
// The operations can be combinations of Insert, Delete, Replace and Merge
|
||||||
|
// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
|
||||||
|
// the changesets.
|
||||||
|
// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
|
||||||
|
func (t *TableBatch) ExecuteBatch() error {
|
||||||
|
|
||||||
|
// Using `github.com/marstr/guid` is in response to issue #947 (https://github.com/Azure/azure-sdk-for-go/issues/947).
|
||||||
|
id, err := guid.NewGUIDs(guid.CreationStrategyVersion1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
changesetBoundary := fmt.Sprintf("changeset_%s", id.String())
|
||||||
|
uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil)
|
||||||
|
changesetBody, err := t.generateChangesetBody(changesetBoundary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err = guid.NewGUIDs(guid.CreationStrategyVersion1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
boundary := fmt.Sprintf("batch_%s", id.String())
|
||||||
|
body, err := generateBody(changesetBody, changesetBoundary, boundary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := t.Table.tsc.client.getStandardHeaders()
|
||||||
|
headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary)
|
||||||
|
|
||||||
|
resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer drainRespBody(resp.resp)
|
||||||
|
|
||||||
|
if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil {
|
||||||
|
|
||||||
|
// check which batch failed.
|
||||||
|
operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value)
|
||||||
|
requestID, date, version := getDebugHeaders(resp.resp.Header)
|
||||||
|
return AzureStorageServiceError{
|
||||||
|
StatusCode: resp.resp.StatusCode,
|
||||||
|
Code: resp.odata.Err.Code,
|
||||||
|
RequestID: requestID,
|
||||||
|
Date: date,
|
||||||
|
APIVersion: version,
|
||||||
|
Message: operationFailedMessage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFailedOperation parses the original Azure error string and determines which operation failed
|
||||||
|
// and generates appropriate message.
|
||||||
|
func (t *TableBatch) getFailedOperation(errorMessage string) string {
|
||||||
|
// errorMessage consists of "number:string" we just need the number.
|
||||||
|
sp := strings.Split(errorMessage, ":")
|
||||||
|
if len(sp) > 1 {
|
||||||
|
msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage)
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// cant parse the message, just return the original message to client
|
||||||
|
return errorMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateBody generates the complete body for the batch request.
|
||||||
|
func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) {
|
||||||
|
|
||||||
|
body := new(bytes.Buffer)
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
writer.SetBoundary(boundary)
|
||||||
|
h := make(textproto.MIMEHeader)
|
||||||
|
h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary))
|
||||||
|
batchWriter, err := writer.CreatePart(h)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
batchWriter.Write(changeSetBody.Bytes())
|
||||||
|
writer.Close()
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateChangesetBody generates the individual changesets for the various operations within the batch request.
|
||||||
|
// There is a changeset for Insert, Delete, Merge etc.
|
||||||
|
func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) {
|
||||||
|
|
||||||
|
body := new(bytes.Buffer)
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
writer.SetBoundary(changesetBoundary)
|
||||||
|
|
||||||
|
for _, be := range t.BatchEntitySlice {
|
||||||
|
t.generateEntitySubset(&be, writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.Close()
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateVerb generates the HTTP request VERB required for each changeset.
|
||||||
|
func generateVerb(op Operation) (string, error) {
|
||||||
|
switch op {
|
||||||
|
case InsertOp:
|
||||||
|
return http.MethodPost, nil
|
||||||
|
case DeleteOp:
|
||||||
|
return http.MethodDelete, nil
|
||||||
|
case ReplaceOp, InsertOrReplaceOp:
|
||||||
|
return http.MethodPut, nil
|
||||||
|
case MergeOp, InsertOrMergeOp:
|
||||||
|
return "MERGE", nil
|
||||||
|
default:
|
||||||
|
return "", errors.New("Unable to detect operation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateQueryPath generates the query path for within the changesets
|
||||||
|
// For inserts it will just be a table query path (table name)
|
||||||
|
// but for other operations (modifying an existing entity) then
|
||||||
|
// the partition/row keys need to be generated.
|
||||||
|
func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string {
|
||||||
|
if op == InsertOp {
|
||||||
|
return entity.Table.buildPath()
|
||||||
|
}
|
||||||
|
return entity.buildPath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateGenericOperationHeaders generates common headers for a given operation.
|
||||||
|
func generateGenericOperationHeaders(be *BatchEntity) map[string]string {
|
||||||
|
retval := map[string]string{}
|
||||||
|
|
||||||
|
for k, v := range defaultChangesetHeaders {
|
||||||
|
retval[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp {
|
||||||
|
if be.Force || be.Entity.OdataEtag == "" {
|
||||||
|
retval["If-Match"] = "*"
|
||||||
|
} else {
|
||||||
|
retval["If-Match"] = be.Entity.OdataEtag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return retval
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateEntitySubset generates body payload for particular batch entity
|
||||||
|
func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error {
|
||||||
|
|
||||||
|
h := make(textproto.MIMEHeader)
|
||||||
|
h.Set(headerContentType, "application/http")
|
||||||
|
h.Set(headerContentTransferEncoding, "binary")
|
||||||
|
|
||||||
|
verb, err := generateVerb(batchEntity.Op)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
genericOpHeadersMap := generateGenericOperationHeaders(batchEntity)
|
||||||
|
queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity)
|
||||||
|
uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil)
|
||||||
|
|
||||||
|
operationWriter, err := writer.CreatePart(h)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri)
|
||||||
|
operationWriter.Write([]byte(urlAndVerb))
|
||||||
|
writeHeaders(genericOpHeadersMap, &operationWriter)
|
||||||
|
operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
|
||||||
|
|
||||||
|
// delete operation doesn't need a body.
|
||||||
|
if batchEntity.Op != DeleteOp {
|
||||||
|
//var e Entity = batchEntity.Entity
|
||||||
|
body, err := json.Marshal(batchEntity.Entity)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
operationWriter.Write(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeHeaders(h map[string]string, writer *io.Writer) {
|
||||||
|
// This way it is guaranteed the headers will be written in a sorted order
|
||||||
|
var keys []string
|
||||||
|
for k := range h {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
(*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k])))
|
||||||
|
}
|
||||||
|
}
|
345
vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go
generated
vendored
345
vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go
generated
vendored
|
@ -1,345 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Annotating as secure for gas scanning
|
|
||||||
/* #nosec */
|
|
||||||
const (
|
|
||||||
partitionKeyNode = "PartitionKey"
|
|
||||||
rowKeyNode = "RowKey"
|
|
||||||
tag = "table"
|
|
||||||
tagIgnore = "-"
|
|
||||||
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
|
|
||||||
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
|
|
||||||
maxTopParameter = 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
type queryTablesResponse struct {
|
|
||||||
TableName []struct {
|
|
||||||
TableName string `json:"TableName"`
|
|
||||||
} `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
tableOperationTypeInsert = iota
|
|
||||||
tableOperationTypeUpdate = iota
|
|
||||||
tableOperationTypeMerge = iota
|
|
||||||
tableOperationTypeInsertOrReplace = iota
|
|
||||||
tableOperationTypeInsertOrMerge = iota
|
|
||||||
)
|
|
||||||
|
|
||||||
type tableOperation int
|
|
||||||
|
|
||||||
// TableEntity interface specifies
|
|
||||||
// the functions needed to support
|
|
||||||
// marshaling and unmarshaling into
|
|
||||||
// Azure Tables. The struct must only contain
|
|
||||||
// simple types because Azure Tables do not
|
|
||||||
// support hierarchy.
|
|
||||||
type TableEntity interface {
|
|
||||||
PartitionKey() string
|
|
||||||
RowKey() string
|
|
||||||
SetPartitionKey(string) error
|
|
||||||
SetRowKey(string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContinuationToken is an opaque (ie not useful to inspect)
|
|
||||||
// struct that Get... methods can return if there are more
|
|
||||||
// entries to be returned than the ones already
|
|
||||||
// returned. Just pass it to the same function to continue
|
|
||||||
// receiving the remaining entries.
|
|
||||||
type ContinuationToken struct {
|
|
||||||
NextPartitionKey string
|
|
||||||
NextRowKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
type getTableEntriesResponse struct {
|
|
||||||
Elements []map[string]interface{} `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryTableEntities queries the specified table and returns the unmarshaled
|
|
||||||
// entities of type retType.
|
|
||||||
// top parameter limits the returned entries up to top. Maximum top
|
|
||||||
// allowed by Azure API is 1000. In case there are more than top entries to be
|
|
||||||
// returned the function will return a non nil *ContinuationToken. You can call the
|
|
||||||
// same function again passing the received ContinuationToken as previousContToken
|
|
||||||
// parameter in order to get the following entries. The query parameter
|
|
||||||
// is the odata query. To retrieve all the entries pass the empty string.
|
|
||||||
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
|
|
||||||
// if there are more entries to be returned and an error in case something went
|
|
||||||
// wrong.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
|
|
||||||
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
|
|
||||||
if top > maxTopParameter {
|
|
||||||
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
|
|
||||||
uri += fmt.Sprintf("?$top=%d", top)
|
|
||||||
if query != "" {
|
|
||||||
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
|
|
||||||
}
|
|
||||||
|
|
||||||
if previousContToken != nil {
|
|
||||||
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
contToken := extractContinuationTokenFromHeaders(resp.headers)
|
|
||||||
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
|
||||||
return nil, contToken, err
|
|
||||||
}
|
|
||||||
|
|
||||||
retEntries, err := deserializeEntity(retType, resp.body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, contToken, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return retEntries, contToken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertEntity inserts an entity in the specified table.
|
|
||||||
// The function fails if there is an entity with the same
|
|
||||||
// PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
if sc, err := c.execTable(table, entity, false, http.MethodPost); err != nil {
|
|
||||||
return checkRespCode(sc, []int{http.StatusCreated})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
|
|
||||||
if specifyKeysInURL {
|
|
||||||
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
return resp.statusCode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateEntity updates the contents of an entity with the
|
|
||||||
// one passed as parameter. The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil {
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeEntity merges the contents of an entity with the
|
|
||||||
// one passed as parameter.
|
|
||||||
// The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEntityWithoutCheck deletes the entity matching by
|
|
||||||
// PartitionKey and RowKey. There is no check on IfMatch
|
|
||||||
// parameter so the entity is always deleted.
|
|
||||||
// The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table.
|
|
||||||
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
|
|
||||||
return c.DeleteEntity(table, entity, "*")
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteEntity deletes the entity matching by
|
|
||||||
// PartitionKey, RowKey and ifMatch field.
|
|
||||||
// The function fails if there is no entity
|
|
||||||
// with the same PartitionKey and RowKey in the table or
|
|
||||||
// the ifMatch is different.
|
|
||||||
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
|
|
||||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
|
|
||||||
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
|
|
||||||
|
|
||||||
headers := c.getStandardHeaders()
|
|
||||||
|
|
||||||
headers["Content-Length"] = "0"
|
|
||||||
headers["If-Match"] = ifMatch
|
|
||||||
|
|
||||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.body.Close()
|
|
||||||
|
|
||||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrReplaceEntity inserts an entity in the specified table
|
|
||||||
// or replaced the existing one.
|
|
||||||
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil {
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrMergeEntity inserts an entity in the specified table
|
|
||||||
// or merges the existing one.
|
|
||||||
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
|
|
||||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
|
||||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
|
|
||||||
if err := json.NewEncoder(buf).Encode(entity); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dec := make(map[string]interface{})
|
|
||||||
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inject PartitionKey and RowKey
|
|
||||||
dec[partitionKeyNode] = entity.PartitionKey()
|
|
||||||
dec[rowKeyNode] = entity.RowKey()
|
|
||||||
|
|
||||||
// Remove tagged fields
|
|
||||||
// The tag is defined in the const section
|
|
||||||
// This is useful to avoid storing the PartitionKey and RowKey twice.
|
|
||||||
numFields := reflect.ValueOf(entity).Elem().NumField()
|
|
||||||
for i := 0; i < numFields; i++ {
|
|
||||||
f := reflect.ValueOf(entity).Elem().Type().Field(i)
|
|
||||||
|
|
||||||
if f.Tag.Get(tag) == tagIgnore {
|
|
||||||
// we must look for its JSON name in the dictionary
|
|
||||||
// as the user can rename it using a tag
|
|
||||||
jsonName := f.Name
|
|
||||||
if f.Tag.Get("json") != "" {
|
|
||||||
jsonName = f.Tag.Get("json")
|
|
||||||
}
|
|
||||||
delete(dec, jsonName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
|
|
||||||
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
var ret getTableEntriesResponse
|
|
||||||
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tEntries := make([]TableEntity, len(ret.Elements))
|
|
||||||
|
|
||||||
for i, entry := range ret.Elements {
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
if err := json.NewEncoder(buf).Encode(entry); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dec := make(map[string]interface{})
|
|
||||||
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var pKey, rKey string
|
|
||||||
// strip pk and rk
|
|
||||||
for key, val := range dec {
|
|
||||||
switch key {
|
|
||||||
case partitionKeyNode:
|
|
||||||
pKey = val.(string)
|
|
||||||
case rowKeyNode:
|
|
||||||
rKey = val.(string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(dec, partitionKeyNode)
|
|
||||||
delete(dec, rowKeyNode)
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
if err := json.NewEncoder(buf).Encode(dec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a empty retType instance
|
|
||||||
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
|
|
||||||
// Popolate it with the values
|
|
||||||
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset PartitionKey and RowKey
|
|
||||||
if err := tEntries[i].SetPartitionKey(pKey); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := tEntries[i].SetRowKey(rKey); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tEntries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
|
|
||||||
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
|
|
||||||
|
|
||||||
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
|
|
||||||
return &ct
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
192
vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
generated
vendored
192
vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go
generated
vendored
|
@ -1,5 +1,35 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerAccept = "Accept"
|
||||||
|
headerEtag = "Etag"
|
||||||
|
headerPrefer = "Prefer"
|
||||||
|
headerXmsContinuation = "x-ms-Continuation-NextTableName"
|
||||||
|
)
|
||||||
|
|
||||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
||||||
// Service.
|
// Service.
|
||||||
type TableServiceClient struct {
|
type TableServiceClient struct {
|
||||||
|
@ -7,14 +37,168 @@ type TableServiceClient struct {
|
||||||
auth authentication
|
auth authentication
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TableOptions includes options for some table operations
|
||||||
|
type TableOptions struct {
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *TableOptions) addToHeaders(h map[string]string) map[string]string {
|
||||||
|
if options != nil {
|
||||||
|
h = addToHeaders(h, "x-ms-client-request-id", options.RequestID)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryNextLink includes information for getting the next page of
|
||||||
|
// results in query operations
|
||||||
|
type QueryNextLink struct {
|
||||||
|
NextLink *string
|
||||||
|
ml MetadataLevel
|
||||||
|
}
|
||||||
|
|
||||||
// GetServiceProperties gets the properties of your storage account's table service.
|
// GetServiceProperties gets the properties of your storage account's table service.
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
||||||
func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||||
return c.client.getServiceProperties(tableServiceName, c.auth)
|
return t.client.getServiceProperties(tableServiceName, t.auth)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetServiceProperties sets the properties of your storage account's table service.
|
// SetServiceProperties sets the properties of your storage account's table service.
|
||||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
||||||
func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
|
func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||||
return c.client.setServiceProperties(props, tableServiceName, c.auth)
|
return t.client.setServiceProperties(props, tableServiceName, t.auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTableReference returns a Table object for the specified table name.
|
||||||
|
func (t *TableServiceClient) GetTableReference(name string) *Table {
|
||||||
|
return &Table{
|
||||||
|
tsc: t,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryTablesOptions includes options for some table operations
|
||||||
|
type QueryTablesOptions struct {
|
||||||
|
Top uint
|
||||||
|
Filter string
|
||||||
|
RequestID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) {
|
||||||
|
query := url.Values{}
|
||||||
|
headers := map[string]string{}
|
||||||
|
if options != nil {
|
||||||
|
if options.Top > 0 {
|
||||||
|
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
|
||||||
|
}
|
||||||
|
if options.Filter != "" {
|
||||||
|
query.Add(OdataFilter, options.Filter)
|
||||||
|
}
|
||||||
|
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||||
|
}
|
||||||
|
return query, headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryTables returns the tables in the storage account.
|
||||||
|
// You can use query options defined by the OData Protocol specification.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
|
||||||
|
func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) {
|
||||||
|
query, headers := options.getParameters()
|
||||||
|
uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query)
|
||||||
|
return t.queryTables(uri, headers, ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextResults returns the next page of results
|
||||||
|
// from a QueryTables or a NextResults operation.
|
||||||
|
//
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
|
||||||
|
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
|
||||||
|
func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) {
|
||||||
|
if tqr == nil {
|
||||||
|
return nil, errNilPreviousResult
|
||||||
|
}
|
||||||
|
if tqr.NextLink == nil {
|
||||||
|
return nil, errNilNextLink
|
||||||
|
}
|
||||||
|
headers := options.addToHeaders(map[string]string{})
|
||||||
|
|
||||||
|
return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableQueryResult contains the response from
|
||||||
|
// QueryTables and QueryTablesNextResults functions.
|
||||||
|
type TableQueryResult struct {
|
||||||
|
OdataMetadata string `json:"odata.metadata"`
|
||||||
|
Tables []Table `json:"value"`
|
||||||
|
QueryNextLink
|
||||||
|
tsc *TableServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) {
|
||||||
|
if ml == EmptyPayload {
|
||||||
|
return nil, errEmptyPayload
|
||||||
|
}
|
||||||
|
headers = mergeHeaders(headers, t.client.getStandardHeaders())
|
||||||
|
headers[headerAccept] = string(ml)
|
||||||
|
|
||||||
|
resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var out TableQueryResult
|
||||||
|
err = json.Unmarshal(respBody, &out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range out.Tables {
|
||||||
|
out.Tables[i].tsc = t
|
||||||
|
}
|
||||||
|
out.tsc = t
|
||||||
|
|
||||||
|
nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation))
|
||||||
|
if nextLink == "" {
|
||||||
|
out.NextLink = nil
|
||||||
|
} else {
|
||||||
|
originalURI, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v := originalURI.Query()
|
||||||
|
v.Set(nextTableQueryParameter, nextLink)
|
||||||
|
newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v)
|
||||||
|
out.NextLink = &newURI
|
||||||
|
out.ml = ml
|
||||||
|
}
|
||||||
|
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addBodyRelatedHeaders(h map[string]string, length int) map[string]string {
|
||||||
|
h[headerContentType] = "application/json"
|
||||||
|
h[headerContentLength] = fmt.Sprintf("%v", length)
|
||||||
|
h[headerAcceptCharset] = "UTF-8"
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string {
|
||||||
|
if ml != EmptyPayload {
|
||||||
|
h[headerPrefer] = "return-content"
|
||||||
|
h[headerAccept] = string(ml)
|
||||||
|
} else {
|
||||||
|
h[headerPrefer] = "return-no-content"
|
||||||
|
// From API version 2015-12-11 onwards, Accept header is required
|
||||||
|
h[headerAccept] = string(NoMetadata)
|
||||||
|
}
|
||||||
|
return h
|
||||||
}
|
}
|
||||||
|
|
165
vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
165
vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
|
@ -12,9 +26,37 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6))
|
||||||
|
accountSASOptions = AccountSASTokenOptions{
|
||||||
|
Services: Services{
|
||||||
|
Blob: true,
|
||||||
|
},
|
||||||
|
ResourceTypes: ResourceTypes{
|
||||||
|
Service: true,
|
||||||
|
Container: true,
|
||||||
|
Object: true,
|
||||||
|
},
|
||||||
|
Permissions: Permissions{
|
||||||
|
Read: true,
|
||||||
|
Write: true,
|
||||||
|
Delete: true,
|
||||||
|
List: true,
|
||||||
|
Add: true,
|
||||||
|
Create: true,
|
||||||
|
Update: true,
|
||||||
|
Process: true,
|
||||||
|
},
|
||||||
|
Expiry: fixedTime,
|
||||||
|
UseHTTPS: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func (c Client) computeHmac256(message string) string {
|
func (c Client) computeHmac256(message string) string {
|
||||||
h := hmac.New(sha256.New, c.accountKey)
|
h := hmac.New(sha256.New, c.accountKey)
|
||||||
h.Write([]byte(message))
|
h.Write([]byte(message))
|
||||||
|
@ -29,6 +71,10 @@ func timeRfc1123Formatted(t time.Time) string {
|
||||||
return t.Format(http.TimeFormat)
|
return t.Format(http.TimeFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func timeRFC3339Formatted(t time.Time) string {
|
||||||
|
return t.Format("2006-01-02T15:04:05.0000000Z")
|
||||||
|
}
|
||||||
|
|
||||||
func mergeParams(v1, v2 url.Values) url.Values {
|
func mergeParams(v1, v2 url.Values) url.Values {
|
||||||
out := url.Values{}
|
out := url.Values{}
|
||||||
for k, v := range v1 {
|
for k, v := range v1 {
|
||||||
|
@ -76,10 +122,123 @@ func headersFromStruct(v interface{}) map[string]string {
|
||||||
value := reflect.ValueOf(v)
|
value := reflect.ValueOf(v)
|
||||||
for i := 0; i < value.NumField(); i++ {
|
for i := 0; i < value.NumField(); i++ {
|
||||||
key := value.Type().Field(i).Tag.Get("header")
|
key := value.Type().Field(i).Tag.Get("header")
|
||||||
val := value.Field(i).String()
|
if key != "" {
|
||||||
if key != "" && val != "" {
|
reflectedValue := reflect.Indirect(value.Field(i))
|
||||||
headers[key] = val
|
var val string
|
||||||
|
if reflectedValue.IsValid() {
|
||||||
|
switch reflectedValue.Type() {
|
||||||
|
case reflect.TypeOf(fixedTime):
|
||||||
|
val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time))
|
||||||
|
case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)):
|
||||||
|
val = strconv.FormatUint(reflectedValue.Uint(), 10)
|
||||||
|
case reflect.TypeOf(int(0)):
|
||||||
|
val = strconv.FormatInt(reflectedValue.Int(), 10)
|
||||||
|
default:
|
||||||
|
val = reflectedValue.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if val != "" {
|
||||||
|
headers[key] = val
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// merges extraHeaders into headers and returns headers
|
||||||
|
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
||||||
|
for k, v := range extraHeaders {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func addToHeaders(h map[string]string, key, value string) map[string]string {
|
||||||
|
if value != "" {
|
||||||
|
h[key] = value
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string {
|
||||||
|
if value != nil {
|
||||||
|
h = addToHeaders(h, key, timeRfc1123Formatted(*value))
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTimeout(params url.Values, timeout uint) url.Values {
|
||||||
|
if timeout > 0 {
|
||||||
|
params.Add("timeout", fmt.Sprintf("%v", timeout))
|
||||||
|
}
|
||||||
|
return params
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
|
||||||
|
if snapshot != nil {
|
||||||
|
params.Add("snapshot", timeRFC3339Formatted(*snapshot))
|
||||||
|
}
|
||||||
|
return params
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) {
|
||||||
|
var out time.Time
|
||||||
|
var err error
|
||||||
|
outStr := h.Get(key)
|
||||||
|
if outStr != "" {
|
||||||
|
out, err = time.Parse(time.RFC1123, outStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
|
||||||
|
type TimeRFC1123 time.Time
|
||||||
|
|
||||||
|
// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
|
||||||
|
func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
|
var value string
|
||||||
|
d.DecodeElement(&value, &start)
|
||||||
|
parse, err := time.Parse(time.RFC1123, value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = TimeRFC1123(parse)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalXML marshals using time.RFC1123.
|
||||||
|
func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
|
return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns a map of custom metadata values from the specified HTTP header
|
||||||
|
func getMetadataFromHeaders(header http.Header) map[string]string {
|
||||||
|
metadata := make(map[string]string)
|
||||||
|
for k, v := range header {
|
||||||
|
// Can't trust CanonicalHeaderKey() to munge case
|
||||||
|
// reliably. "_" is allowed in identifiers:
|
||||||
|
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||||
|
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||||
|
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||||
|
// ...but "_" is considered invalid by
|
||||||
|
// CanonicalMIMEHeaderKey in
|
||||||
|
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||||
|
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||||
|
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||||
|
metadata[k] = v[len(v)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metadata) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
}
|
||||||
|
|
5
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
5
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
package storage
|
|
||||||
|
|
||||||
var (
|
|
||||||
sdkVersion = "0.1.0"
|
|
||||||
)
|
|
21
vendor/github.com/Azure/azure-sdk-for-go/version/version.go
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-sdk-for-go/version/version.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package version
|
||||||
|
|
||||||
|
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
//
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||||
|
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||||
|
|
||||||
|
// Number contains the semantic version of this SDK.
|
||||||
|
const Number = "v16.2.1"
|
29
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
29
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
|
@ -1,11 +1,22 @@
|
||||||
# go-autorest
|
# go-autorest
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest)
|
[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest)
|
||||||
|
[![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest)
|
||||||
|
|
||||||
## Usage
|
Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages.
|
||||||
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
|
|
||||||
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
|
An authentication client tested with Azure Active Directory (AAD) is also
|
||||||
generated Go code.
|
provided in this repo in the package
|
||||||
|
`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package
|
||||||
|
is maintained only as part of the Azure Go SDK and is not related to other
|
||||||
|
"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Package go-autorest implements an HTTP request pipeline suitable for use across
|
||||||
|
multiple goroutines and provides the shared routines used by packages generated
|
||||||
|
by [Autorest](https://github.com/Azure/autorest.go).
|
||||||
|
|
||||||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
||||||
and Responding. A typical pattern is:
|
and Responding. A typical pattern is:
|
||||||
|
@ -129,4 +140,10 @@ go get github.com/Azure/go-autorest/autorest/to
|
||||||
See LICENSE file.
|
See LICENSE file.
|
||||||
|
|
||||||
-----
|
-----
|
||||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of
|
||||||
|
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||||
|
see the [Code of Conduct
|
||||||
|
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||||
|
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||||
|
questions or comments.
|
||||||
|
|
292
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
Normal file
292
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
# Azure Active Directory authentication for Go
|
||||||
|
|
||||||
|
This is a standalone package for authenticating with Azure Active
|
||||||
|
Directory from other Go libraries and applications, in particular the [Azure SDK
|
||||||
|
for Go](https://github.com/Azure/azure-sdk-for-go).
|
||||||
|
|
||||||
|
Note: Despite the package's name it is not related to other "ADAL" libraries
|
||||||
|
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
|
||||||
|
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
|
||||||
|
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
|
||||||
|
trackers.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get -u github.com/Azure/go-autorest/autorest/adal
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
|
||||||
|
|
||||||
|
### Register an Azure AD Application with secret
|
||||||
|
|
||||||
|
|
||||||
|
1. Register a new application with a `secret` credential
|
||||||
|
|
||||||
|
```
|
||||||
|
az ad app create \
|
||||||
|
--display-name example-app \
|
||||||
|
--homepage https://example-app/home \
|
||||||
|
--identifier-uris https://example-app/app \
|
||||||
|
--password secret
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create a service principal using the `Application ID` from previous step
|
||||||
|
|
||||||
|
```
|
||||||
|
az ad sp create --id "Application ID"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace `Application ID` with `appId` from step 1.
|
||||||
|
|
||||||
|
### Register an Azure AD Application with certificate
|
||||||
|
|
||||||
|
1. Create a private key
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl genrsa -out "example-app.key" 2048
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create the certificate
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
|
||||||
|
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Create the PKCS12 version of the certificate containing also the private key
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Register a new application with the certificate content form `example-app.crt`
|
||||||
|
|
||||||
|
```
|
||||||
|
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
|
||||||
|
|
||||||
|
az ad app create \
|
||||||
|
--display-name example-app \
|
||||||
|
--homepage https://example-app/home \
|
||||||
|
--identifier-uris https://example-app/app \
|
||||||
|
--key-usage Verify --end-date 2018-01-01 \
|
||||||
|
--key-value "${certificateContents}"
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Create a service principal using the `Application ID` from previous step
|
||||||
|
|
||||||
|
```
|
||||||
|
az ad sp create --id "APPLICATION_ID"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace `APPLICATION_ID` with `appId` from step 4.
|
||||||
|
|
||||||
|
|
||||||
|
### Grant the necessary permissions
|
||||||
|
|
||||||
|
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
|
||||||
|
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
|
||||||
|
which can be assigned to a service principal of an Azure AD application depending of your needs.
|
||||||
|
|
||||||
|
```
|
||||||
|
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
|
||||||
|
* Replace the `ROLE_NAME` with a role name of your choice.
|
||||||
|
|
||||||
|
It is also possible to define custom role definitions.
|
||||||
|
|
||||||
|
```
|
||||||
|
az role definition create --role-definition role-definition.json
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
|
||||||
|
|
||||||
|
|
||||||
|
### Acquire Access Token
|
||||||
|
|
||||||
|
The common configuration used by all flows:
|
||||||
|
|
||||||
|
```Go
|
||||||
|
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
|
||||||
|
tenantID := "TENANT_ID"
|
||||||
|
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
|
||||||
|
|
||||||
|
applicationID := "APPLICATION_ID"
|
||||||
|
|
||||||
|
callback := func(token adal.Token) error {
|
||||||
|
// This is called after the token is acquired
|
||||||
|
}
|
||||||
|
|
||||||
|
// The resource for which the token is acquired
|
||||||
|
resource := "https://management.core.windows.net/"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace the `TENANT_ID` with your tenant ID.
|
||||||
|
* Replace the `APPLICATION_ID` with the value from previous section.
|
||||||
|
|
||||||
|
#### Client Credentials
|
||||||
|
|
||||||
|
```Go
|
||||||
|
applicationSecret := "APPLICATION_SECRET"
|
||||||
|
|
||||||
|
spt, err := adal.NewServicePrincipalToken(
|
||||||
|
oauthConfig,
|
||||||
|
appliationID,
|
||||||
|
applicationSecret,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire a new access token
|
||||||
|
err = spt.Refresh()
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
|
||||||
|
|
||||||
|
#### Client Certificate
|
||||||
|
|
||||||
|
```Go
|
||||||
|
certificatePath := "./example-app.pfx"
|
||||||
|
|
||||||
|
certData, err := ioutil.ReadFile(certificatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the certificate and private key from pfx file
|
||||||
|
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
||||||
|
oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
certificate,
|
||||||
|
rsaPrivateKey,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
// Acquire a new access token
|
||||||
|
err = spt.Refresh()
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
|
||||||
|
|
||||||
|
|
||||||
|
#### Device Code
|
||||||
|
|
||||||
|
```Go
|
||||||
|
oauthClient := &http.Client{}
|
||||||
|
|
||||||
|
// Acquire the device code
|
||||||
|
deviceCode, err := adal.InitiateDeviceAuth(
|
||||||
|
oauthClient,
|
||||||
|
oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display the authentication message
|
||||||
|
fmt.Println(*deviceCode.Message)
|
||||||
|
|
||||||
|
// Wait here until the user is authenticated
|
||||||
|
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
||||||
|
oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
resource,
|
||||||
|
*token,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Username password authenticate
|
||||||
|
|
||||||
|
```Go
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
||||||
|
oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Authorization code authenticate
|
||||||
|
|
||||||
|
``` Go
|
||||||
|
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
||||||
|
oauthConfig,
|
||||||
|
applicationID,
|
||||||
|
clientSecret,
|
||||||
|
authorizationCode,
|
||||||
|
redirectURI,
|
||||||
|
resource,
|
||||||
|
callbacks...)
|
||||||
|
|
||||||
|
err = spt.Refresh()
|
||||||
|
if (err == nil) {
|
||||||
|
token := spt.Token
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Tool
|
||||||
|
|
||||||
|
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
|
||||||
|
|
||||||
|
```
|
||||||
|
adal -h
|
||||||
|
|
||||||
|
Usage of ./adal:
|
||||||
|
-applicationId string
|
||||||
|
application id
|
||||||
|
-certificatePath string
|
||||||
|
path to pk12/PFC application certificate
|
||||||
|
-mode string
|
||||||
|
authentication mode (device, secret, cert, refresh) (default "device")
|
||||||
|
-resource string
|
||||||
|
resource for which the token is requested
|
||||||
|
-secret string
|
||||||
|
application secret
|
||||||
|
-tenantId string
|
||||||
|
tenant id
|
||||||
|
-tokenCachePath string
|
||||||
|
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
|
||||||
|
```
|
||||||
|
|
||||||
|
Example acquire a token for `https://management.core.windows.net/` using device code flow:
|
||||||
|
|
||||||
|
```
|
||||||
|
adal -mode device \
|
||||||
|
-applicationId "APPLICATION_ID" \
|
||||||
|
-tenantId "TENANT_ID" \
|
||||||
|
-resource https://management.core.windows.net/
|
||||||
|
|
||||||
|
```
|
81
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
Normal file
81
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
activeDirectoryAPIVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OAuthConfig represents the endpoints needed
|
||||||
|
// in OAuth operations
|
||||||
|
type OAuthConfig struct {
|
||||||
|
AuthorityEndpoint url.URL
|
||||||
|
AuthorizeEndpoint url.URL
|
||||||
|
TokenEndpoint url.URL
|
||||||
|
DeviceCodeEndpoint url.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsZero returns true if the OAuthConfig object is zero-initialized.
|
||||||
|
func (oac OAuthConfig) IsZero() bool {
|
||||||
|
return oac == OAuthConfig{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateStringParam(param, name string) error {
|
||||||
|
if len(param) == 0 {
|
||||||
|
return fmt.Errorf("parameter '" + name + "' cannot be empty")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
||||||
|
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
||||||
|
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// it's legal for tenantID to be empty so don't validate it
|
||||||
|
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
|
||||||
|
u, err := url.Parse(activeDirectoryEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
authorityURL, err := u.Parse(tenantID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &OAuthConfig{
|
||||||
|
AuthorityEndpoint: *authorityURL,
|
||||||
|
AuthorizeEndpoint: *authorizeURL,
|
||||||
|
TokenEndpoint: *tokenURL,
|
||||||
|
DeviceCodeEndpoint: *deviceCodeURL,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -1,4 +1,18 @@
|
||||||
package azure
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
||||||
|
@ -10,16 +24,17 @@ package azure
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
logPrefix = "autorest/azure/devicetoken:"
|
logPrefix = "autorest/adal/devicetoken:"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -38,10 +53,17 @@ var (
|
||||||
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
||||||
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
||||||
|
|
||||||
|
// ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow
|
||||||
|
ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix)
|
||||||
|
|
||||||
|
// ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow
|
||||||
|
ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix)
|
||||||
|
|
||||||
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
||||||
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
||||||
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
||||||
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
||||||
|
errStatusNotOK = "Error HTTP status != 200"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeviceCode is the object returned by the device auth endpoint
|
// DeviceCode is the object returned by the device auth endpoint
|
||||||
|
@ -79,31 +101,45 @@ type deviceToken struct {
|
||||||
|
|
||||||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
||||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
||||||
func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
||||||
req, _ := autorest.Prepare(
|
v := url.Values{
|
||||||
&http.Request{},
|
"client_id": []string{clientID},
|
||||||
autorest.AsPost(),
|
"resource": []string{resource},
|
||||||
autorest.AsFormURLEncoded(),
|
}
|
||||||
autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()),
|
|
||||||
autorest.WithFormData(url.Values{
|
|
||||||
"client_id": []string{clientID},
|
|
||||||
"resource": []string{resource},
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
resp, err := autorest.SendWithSender(client, req)
|
s := v.Encode()
|
||||||
|
body := ioutil.NopCloser(strings.NewReader(s))
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err)
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
req.ContentLength = int64(len(s))
|
||||||
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
|
resp, err := sender.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
rb, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return nil, ErrDeviceCodeEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
var code DeviceCode
|
var code DeviceCode
|
||||||
err = autorest.Respond(
|
err = json.Unmarshal(rb, &code)
|
||||||
resp,
|
|
||||||
autorest.WithErrorUnlessStatusCode(http.StatusOK),
|
|
||||||
autorest.ByUnmarshallingJSON(&code),
|
|
||||||
autorest.ByClosing())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err)
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
code.ClientID = clientID
|
code.ClientID = clientID
|
||||||
|
@ -115,33 +151,46 @@ func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, client
|
||||||
|
|
||||||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
||||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
// to see if the device flow has: been completed, timed out, or otherwise failed
|
||||||
func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
|
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
req, _ := autorest.Prepare(
|
v := url.Values{
|
||||||
&http.Request{},
|
"client_id": []string{code.ClientID},
|
||||||
autorest.AsPost(),
|
"code": []string{*code.DeviceCode},
|
||||||
autorest.AsFormURLEncoded(),
|
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
||||||
autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()),
|
"resource": []string{code.Resource},
|
||||||
autorest.WithFormData(url.Values{
|
}
|
||||||
"client_id": []string{code.ClientID},
|
|
||||||
"code": []string{*code.DeviceCode},
|
|
||||||
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
|
||||||
"resource": []string{code.Resource},
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
|
|
||||||
resp, err := autorest.SendWithSender(client, req)
|
s := v.Encode()
|
||||||
|
body := ioutil.NopCloser(strings.NewReader(s))
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err)
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
req.ContentLength = int64(len(s))
|
||||||
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
|
resp, err := sender.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
rb, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK)
|
||||||
|
}
|
||||||
|
if len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return nil, ErrOAuthTokenEmpty
|
||||||
}
|
}
|
||||||
|
|
||||||
var token deviceToken
|
var token deviceToken
|
||||||
err = autorest.Respond(
|
err = json.Unmarshal(rb, &token)
|
||||||
resp,
|
|
||||||
autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest),
|
|
||||||
autorest.ByUnmarshallingJSON(&token),
|
|
||||||
autorest.ByClosing())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err)
|
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if token.Error == nil {
|
if token.Error == nil {
|
||||||
|
@ -164,12 +213,12 @@ func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token,
|
||||||
|
|
||||||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
||||||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
||||||
func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
|
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
|
||||||
intervalDuration := time.Duration(*code.Interval) * time.Second
|
intervalDuration := time.Duration(*code.Interval) * time.Second
|
||||||
waitDuration := intervalDuration
|
waitDuration := intervalDuration
|
||||||
|
|
||||||
for {
|
for {
|
||||||
token, err := CheckForUserCompletion(client, code)
|
token, err := CheckForUserCompletion(sender, code)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return token, nil
|
return token, nil
|
|
@ -1,4 +1,18 @@
|
||||||
package azure
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
60
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
Normal file
60
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
contentType = "Content-Type"
|
||||||
|
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sender is the interface that wraps the Do method to send HTTP requests.
|
||||||
|
//
|
||||||
|
// The standard http.Client conforms to this interface.
|
||||||
|
type Sender interface {
|
||||||
|
Do(*http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SenderFunc is a method that implements the Sender interface.
|
||||||
|
type SenderFunc func(*http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
// Do implements the Sender interface on SenderFunc.
|
||||||
|
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
||||||
|
return sf(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
||||||
|
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
||||||
|
// http.Response result.
|
||||||
|
type SendDecorator func(Sender) Sender
|
||||||
|
|
||||||
|
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
||||||
|
func CreateSender(decorators ...SendDecorator) Sender {
|
||||||
|
return DecorateSender(&http.Client{}, decorators...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
||||||
|
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
||||||
|
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
||||||
|
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
||||||
|
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
||||||
|
for _, decorate := range decorators {
|
||||||
|
s = decorate(s)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
795
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
Normal file
795
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
Normal file
|
@ -0,0 +1,795 @@
|
||||||
|
package adal
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/sha1"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/date"
|
||||||
|
"github.com/dgrijalva/jwt-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultRefresh = 5 * time.Minute
|
||||||
|
|
||||||
|
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
|
||||||
|
OAuthGrantTypeDeviceCode = "device_code"
|
||||||
|
|
||||||
|
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
|
||||||
|
OAuthGrantTypeClientCredentials = "client_credentials"
|
||||||
|
|
||||||
|
// OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows
|
||||||
|
OAuthGrantTypeUserPass = "password"
|
||||||
|
|
||||||
|
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
|
||||||
|
OAuthGrantTypeRefreshToken = "refresh_token"
|
||||||
|
|
||||||
|
// OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows
|
||||||
|
OAuthGrantTypeAuthorizationCode = "authorization_code"
|
||||||
|
|
||||||
|
// metadataHeader is the header required by MSI extension
|
||||||
|
metadataHeader = "Metadata"
|
||||||
|
|
||||||
|
// msiEndpoint is the well known endpoint for getting MSI authentications tokens
|
||||||
|
msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
|
||||||
|
type OAuthTokenProvider interface {
|
||||||
|
OAuthToken() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenRefreshError is an interface used by errors returned during token refresh.
|
||||||
|
type TokenRefreshError interface {
|
||||||
|
error
|
||||||
|
Response() *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresher is an interface for token refresh functionality
|
||||||
|
type Refresher interface {
|
||||||
|
Refresh() error
|
||||||
|
RefreshExchange(resource string) error
|
||||||
|
EnsureFresh() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefresherWithContext is an interface for token refresh functionality
|
||||||
|
type RefresherWithContext interface {
|
||||||
|
RefreshWithContext(ctx context.Context) error
|
||||||
|
RefreshExchangeWithContext(ctx context.Context, resource string) error
|
||||||
|
EnsureFreshWithContext(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenRefreshCallback is the type representing callbacks that will be called after
|
||||||
|
// a successful token refresh
|
||||||
|
type TokenRefreshCallback func(Token) error
|
||||||
|
|
||||||
|
// Token encapsulates the access token used to authorize Azure requests.
|
||||||
|
type Token struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
|
||||||
|
ExpiresIn string `json:"expires_in"`
|
||||||
|
ExpiresOn string `json:"expires_on"`
|
||||||
|
NotBefore string `json:"not_before"`
|
||||||
|
|
||||||
|
Resource string `json:"resource"`
|
||||||
|
Type string `json:"token_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsZero returns true if the token object is zero-initialized.
|
||||||
|
func (t Token) IsZero() bool {
|
||||||
|
return t == Token{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expires returns the time.Time when the Token expires.
|
||||||
|
func (t Token) Expires() time.Time {
|
||||||
|
s, err := strconv.Atoi(t.ExpiresOn)
|
||||||
|
if err != nil {
|
||||||
|
s = -3600
|
||||||
|
}
|
||||||
|
|
||||||
|
expiration := date.NewUnixTimeFromSeconds(float64(s))
|
||||||
|
|
||||||
|
return time.Time(expiration).UTC()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsExpired returns true if the Token is expired, false otherwise.
|
||||||
|
func (t Token) IsExpired() bool {
|
||||||
|
return t.WillExpireIn(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
|
||||||
|
// from now, false otherwise.
|
||||||
|
func (t Token) WillExpireIn(d time.Duration) bool {
|
||||||
|
return !t.Expires().After(time.Now().Add(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
//OAuthToken return the current access token
|
||||||
|
func (t *Token) OAuthToken() string {
|
||||||
|
return t.AccessToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalNoSecret represents a secret type that contains no secret
|
||||||
|
// meaning it is not valid for fetching a fresh token. This is used by Manual
|
||||||
|
type ServicePrincipalNoSecret struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
|
||||||
|
// It only returns an error for the ServicePrincipalNoSecret type
|
||||||
|
func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||||
|
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
|
||||||
|
// that is submitted when acquiring an oAuth token.
|
||||||
|
type ServicePrincipalSecret interface {
|
||||||
|
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
|
||||||
|
type ServicePrincipalTokenSecret struct {
|
||||||
|
ClientSecret string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||||
|
// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
|
||||||
|
func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||||
|
v.Set("client_secret", tokenSecret.ClientSecret)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
|
||||||
|
type ServicePrincipalCertificateSecret struct {
|
||||||
|
Certificate *x509.Certificate
|
||||||
|
PrivateKey *rsa.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
|
||||||
|
type ServicePrincipalMSISecret struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth.
|
||||||
|
type ServicePrincipalUsernamePasswordSecret struct {
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth.
|
||||||
|
type ServicePrincipalAuthorizationCodeSecret struct {
|
||||||
|
ClientSecret string
|
||||||
|
AuthorizationCode string
|
||||||
|
RedirectURI string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||||
|
func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||||
|
v.Set("code", secret.AuthorizationCode)
|
||||||
|
v.Set("client_secret", secret.ClientSecret)
|
||||||
|
v.Set("redirect_uri", secret.RedirectURI)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||||
|
func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||||
|
v.Set("username", secret.Username)
|
||||||
|
v.Set("password", secret.Password)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||||
|
func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignJwt returns the JWT signed with the certificate's private key.
|
||||||
|
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
|
||||||
|
hasher := sha1.New()
|
||||||
|
_, err := hasher.Write(secret.Certificate.Raw)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
// The jti (JWT ID) claim provides a unique identifier for the JWT.
|
||||||
|
jti := make([]byte, 20)
|
||||||
|
_, err = rand.Read(jti)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
token := jwt.New(jwt.SigningMethodRS256)
|
||||||
|
token.Header["x5t"] = thumbprint
|
||||||
|
token.Claims = jwt.MapClaims{
|
||||||
|
"aud": spt.oauthConfig.TokenEndpoint.String(),
|
||||||
|
"iss": spt.clientID,
|
||||||
|
"sub": spt.clientID,
|
||||||
|
"jti": base64.URLEncoding.EncodeToString(jti),
|
||||||
|
"nbf": time.Now().Unix(),
|
||||||
|
"exp": time.Now().Add(time.Hour * 24).Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
signedString, err := token.SignedString(secret.PrivateKey)
|
||||||
|
return signedString, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||||
|
// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
|
||||||
|
func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||||
|
jwt, err := secret.SignJwt(spt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Set("client_assertion", jwt)
|
||||||
|
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
|
||||||
|
type ServicePrincipalToken struct {
|
||||||
|
token Token
|
||||||
|
secret ServicePrincipalSecret
|
||||||
|
oauthConfig OAuthConfig
|
||||||
|
clientID string
|
||||||
|
resource string
|
||||||
|
autoRefresh bool
|
||||||
|
refreshLock *sync.RWMutex
|
||||||
|
refreshWithin time.Duration
|
||||||
|
sender Sender
|
||||||
|
|
||||||
|
refreshCallbacks []TokenRefreshCallback
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOAuthConfig(oac OAuthConfig) error {
|
||||||
|
if oac.IsZero() {
|
||||||
|
return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
|
||||||
|
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(id, "id"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if secret == nil {
|
||||||
|
return nil, fmt.Errorf("parameter 'secret' cannot be nil")
|
||||||
|
}
|
||||||
|
spt := &ServicePrincipalToken{
|
||||||
|
oauthConfig: oauthConfig,
|
||||||
|
secret: secret,
|
||||||
|
clientID: id,
|
||||||
|
resource: resource,
|
||||||
|
autoRefresh: true,
|
||||||
|
refreshLock: &sync.RWMutex{},
|
||||||
|
refreshWithin: defaultRefresh,
|
||||||
|
sender: &http.Client{},
|
||||||
|
refreshCallbacks: callbacks,
|
||||||
|
}
|
||||||
|
return spt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
|
||||||
|
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if token.IsZero() {
|
||||||
|
return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized")
|
||||||
|
}
|
||||||
|
spt, err := NewServicePrincipalTokenWithSecret(
|
||||||
|
oauthConfig,
|
||||||
|
clientID,
|
||||||
|
resource,
|
||||||
|
&ServicePrincipalNoSecret{},
|
||||||
|
callbacks...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
spt.token = token
|
||||||
|
|
||||||
|
return spt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
|
||||||
|
// credentials scoped to the named resource.
|
||||||
|
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(secret, "secret"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewServicePrincipalTokenWithSecret(
|
||||||
|
oauthConfig,
|
||||||
|
clientID,
|
||||||
|
resource,
|
||||||
|
&ServicePrincipalTokenSecret{
|
||||||
|
ClientSecret: secret,
|
||||||
|
},
|
||||||
|
callbacks...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes.
|
||||||
|
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if certificate == nil {
|
||||||
|
return nil, fmt.Errorf("parameter 'certificate' cannot be nil")
|
||||||
|
}
|
||||||
|
if privateKey == nil {
|
||||||
|
return nil, fmt.Errorf("parameter 'privateKey' cannot be nil")
|
||||||
|
}
|
||||||
|
return NewServicePrincipalTokenWithSecret(
|
||||||
|
oauthConfig,
|
||||||
|
clientID,
|
||||||
|
resource,
|
||||||
|
&ServicePrincipalCertificateSecret{
|
||||||
|
PrivateKey: privateKey,
|
||||||
|
Certificate: certificate,
|
||||||
|
},
|
||||||
|
callbacks...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password.
|
||||||
|
func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(username, "username"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(password, "password"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewServicePrincipalTokenWithSecret(
|
||||||
|
oauthConfig,
|
||||||
|
clientID,
|
||||||
|
resource,
|
||||||
|
&ServicePrincipalUsernamePasswordSecret{
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
},
|
||||||
|
callbacks...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the
|
||||||
|
func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
|
||||||
|
if err := validateOAuthConfig(oauthConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(clientSecret, "clientSecret"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(redirectURI, "redirectURI"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewServicePrincipalTokenWithSecret(
|
||||||
|
oauthConfig,
|
||||||
|
clientID,
|
||||||
|
resource,
|
||||||
|
&ServicePrincipalAuthorizationCodeSecret{
|
||||||
|
ClientSecret: clientSecret,
|
||||||
|
AuthorizationCode: authorizationCode,
|
||||||
|
RedirectURI: redirectURI,
|
||||||
|
},
|
||||||
|
callbacks...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
|
||||||
|
func GetMSIVMEndpoint() (string, error) {
|
||||||
|
return msiEndpoint, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
|
||||||
|
// It will use the system assigned identity when creating the token.
|
||||||
|
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
|
||||||
|
// It will use the specified user assigned identity when creating the token.
|
||||||
|
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||||
|
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if userAssignedID != nil {
|
||||||
|
if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We set the oauth config token endpoint to be MSI's endpoint
|
||||||
|
msiEndpointURL, err := url.Parse(msiEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := url.Values{}
|
||||||
|
v.Set("resource", resource)
|
||||||
|
v.Set("api-version", "2018-02-01")
|
||||||
|
if userAssignedID != nil {
|
||||||
|
v.Set("client_id", *userAssignedID)
|
||||||
|
}
|
||||||
|
msiEndpointURL.RawQuery = v.Encode()
|
||||||
|
|
||||||
|
spt := &ServicePrincipalToken{
|
||||||
|
oauthConfig: OAuthConfig{
|
||||||
|
TokenEndpoint: *msiEndpointURL,
|
||||||
|
},
|
||||||
|
secret: &ServicePrincipalMSISecret{},
|
||||||
|
resource: resource,
|
||||||
|
autoRefresh: true,
|
||||||
|
refreshLock: &sync.RWMutex{},
|
||||||
|
refreshWithin: defaultRefresh,
|
||||||
|
sender: &http.Client{},
|
||||||
|
refreshCallbacks: callbacks,
|
||||||
|
}
|
||||||
|
|
||||||
|
if userAssignedID != nil {
|
||||||
|
spt.clientID = *userAssignedID
|
||||||
|
}
|
||||||
|
|
||||||
|
return spt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// internal type that implements TokenRefreshError
|
||||||
|
type tokenRefreshError struct {
|
||||||
|
message string
|
||||||
|
resp *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface which is part of the TokenRefreshError interface.
|
||||||
|
func (tre tokenRefreshError) Error() string {
|
||||||
|
return tre.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation.
|
||||||
|
func (tre tokenRefreshError) Response() *http.Response {
|
||||||
|
return tre.resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError {
|
||||||
|
return tokenRefreshError{message: message, resp: resp}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
|
||||||
|
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||||
|
func (spt *ServicePrincipalToken) EnsureFresh() error {
|
||||||
|
return spt.EnsureFreshWithContext(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
||||||
|
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||||
|
func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
||||||
|
if spt.autoRefresh && spt.token.WillExpireIn(spt.refreshWithin) {
|
||||||
|
// take the write lock then check to see if the token was already refreshed
|
||||||
|
spt.refreshLock.Lock()
|
||||||
|
defer spt.refreshLock.Unlock()
|
||||||
|
if spt.token.WillExpireIn(spt.refreshWithin) {
|
||||||
|
return spt.refreshInternal(ctx, spt.resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
|
||||||
|
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
|
||||||
|
if spt.refreshCallbacks != nil {
|
||||||
|
for _, callback := range spt.refreshCallbacks {
|
||||||
|
err := callback(spt.token)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh obtains a fresh token for the Service Principal.
|
||||||
|
// This method is not safe for concurrent use and should be syncrhonized.
|
||||||
|
func (spt *ServicePrincipalToken) Refresh() error {
|
||||||
|
return spt.RefreshWithContext(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshWithContext obtains a fresh token for the Service Principal.
|
||||||
|
// This method is not safe for concurrent use and should be syncrhonized.
|
||||||
|
func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
|
||||||
|
spt.refreshLock.Lock()
|
||||||
|
defer spt.refreshLock.Unlock()
|
||||||
|
return spt.refreshInternal(ctx, spt.resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshExchange refreshes the token, but for a different resource.
|
||||||
|
// This method is not safe for concurrent use and should be syncrhonized.
|
||||||
|
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
|
||||||
|
return spt.RefreshExchangeWithContext(context.Background(), resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshExchangeWithContext refreshes the token, but for a different resource.
|
||||||
|
// This method is not safe for concurrent use and should be syncrhonized.
|
||||||
|
func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
|
||||||
|
spt.refreshLock.Lock()
|
||||||
|
defer spt.refreshLock.Unlock()
|
||||||
|
return spt.refreshInternal(ctx, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spt *ServicePrincipalToken) getGrantType() string {
|
||||||
|
switch spt.secret.(type) {
|
||||||
|
case *ServicePrincipalUsernamePasswordSecret:
|
||||||
|
return OAuthGrantTypeUserPass
|
||||||
|
case *ServicePrincipalAuthorizationCodeSecret:
|
||||||
|
return OAuthGrantTypeAuthorizationCode
|
||||||
|
default:
|
||||||
|
return OAuthGrantTypeClientCredentials
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIMDS(u url.URL) bool {
|
||||||
|
imds, err := url.Parse(msiEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return u.Host == imds.Host && u.Path == imds.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
|
||||||
|
req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
if !isIMDS(spt.oauthConfig.TokenEndpoint) {
|
||||||
|
v := url.Values{}
|
||||||
|
v.Set("client_id", spt.clientID)
|
||||||
|
v.Set("resource", resource)
|
||||||
|
|
||||||
|
if spt.token.RefreshToken != "" {
|
||||||
|
v.Set("grant_type", OAuthGrantTypeRefreshToken)
|
||||||
|
v.Set("refresh_token", spt.token.RefreshToken)
|
||||||
|
} else {
|
||||||
|
v.Set("grant_type", spt.getGrantType())
|
||||||
|
err := spt.secret.SetAuthenticationValues(spt, &v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s := v.Encode()
|
||||||
|
body := ioutil.NopCloser(strings.NewReader(s))
|
||||||
|
req.ContentLength = int64(len(s))
|
||||||
|
req.Header.Set(contentType, mimeTypeFormPost)
|
||||||
|
req.Body = body
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok {
|
||||||
|
req.Method = http.MethodGet
|
||||||
|
req.Header.Set(metadataHeader, "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
if isIMDS(spt.oauthConfig.TokenEndpoint) {
|
||||||
|
resp, err = retry(spt.sender, req)
|
||||||
|
} else {
|
||||||
|
resp, err = spt.sender.Do(req)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
rb, err := ioutil.ReadAll(resp.Body)
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
if err != nil {
|
||||||
|
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp)
|
||||||
|
}
|
||||||
|
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// for the following error cases don't return a TokenRefreshError. the operation succeeded
|
||||||
|
// but some transient failure happened during deserialization. by returning a generic error
|
||||||
|
// the retry logic will kick in (we don't retry on TokenRefreshError).
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err)
|
||||||
|
}
|
||||||
|
if len(strings.Trim(string(rb), " ")) == 0 {
|
||||||
|
return fmt.Errorf("adal: Empty service principal token received during refresh")
|
||||||
|
}
|
||||||
|
var token Token
|
||||||
|
err = json.Unmarshal(rb, &token)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb))
|
||||||
|
}
|
||||||
|
|
||||||
|
spt.token = token
|
||||||
|
|
||||||
|
return spt.InvokeRefreshCallbacks(token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func retry(sender Sender, req *http.Request) (resp *http.Response, err error) {
|
||||||
|
retries := []int{
|
||||||
|
http.StatusRequestTimeout, // 408
|
||||||
|
http.StatusTooManyRequests, // 429
|
||||||
|
http.StatusInternalServerError, // 500
|
||||||
|
http.StatusBadGateway, // 502
|
||||||
|
http.StatusServiceUnavailable, // 503
|
||||||
|
http.StatusGatewayTimeout, // 504
|
||||||
|
}
|
||||||
|
// Extra retry status codes requered
|
||||||
|
retries = append(retries, http.StatusNotFound,
|
||||||
|
// all remaining 5xx
|
||||||
|
http.StatusNotImplemented,
|
||||||
|
http.StatusHTTPVersionNotSupported,
|
||||||
|
http.StatusVariantAlsoNegotiates,
|
||||||
|
http.StatusInsufficientStorage,
|
||||||
|
http.StatusLoopDetected,
|
||||||
|
http.StatusNotExtended,
|
||||||
|
http.StatusNetworkAuthenticationRequired)
|
||||||
|
|
||||||
|
attempt := 0
|
||||||
|
maxAttempts := 5
|
||||||
|
|
||||||
|
for attempt < maxAttempts {
|
||||||
|
resp, err = sender.Do(req)
|
||||||
|
// retry on temporary network errors, e.g. transient network failures.
|
||||||
|
if (err != nil && !isTemporaryNetworkError(err)) || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !delay(resp, req.Context().Done()) {
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
attempt++
|
||||||
|
case <-req.Context().Done():
|
||||||
|
err = req.Context().Err()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func isTemporaryNetworkError(err error) bool {
|
||||||
|
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsInt(ints []int, n int) bool {
|
||||||
|
for _, i := range ints {
|
||||||
|
if i == n {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func delay(resp *http.Response, cancel <-chan struct{}) bool {
|
||||||
|
if resp == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Duration(retryAfter) * time.Second):
|
||||||
|
return true
|
||||||
|
case <-cancel:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
|
||||||
|
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
|
||||||
|
spt.autoRefresh = autoRefresh
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
|
||||||
|
// refresh the token.
|
||||||
|
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
|
||||||
|
spt.refreshWithin = d
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSender sets the http.Client used when obtaining the Service Principal token. An
|
||||||
|
// undecorated http.Client is used by default.
|
||||||
|
func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s }
|
||||||
|
|
||||||
|
// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token.
|
||||||
|
func (spt *ServicePrincipalToken) OAuthToken() string {
|
||||||
|
spt.refreshLock.RLock()
|
||||||
|
defer spt.refreshLock.RUnlock()
|
||||||
|
return spt.token.OAuthToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token returns a copy of the current token.
|
||||||
|
func (spt *ServicePrincipalToken) Token() Token {
|
||||||
|
spt.refreshLock.RLock()
|
||||||
|
defer spt.refreshLock.RUnlock()
|
||||||
|
return spt.token
|
||||||
|
}
|
259
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
Normal file
259
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
||||||
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bearerChallengeHeader = "Www-Authenticate"
|
||||||
|
bearer = "Bearer"
|
||||||
|
tenantID = "tenantID"
|
||||||
|
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
|
||||||
|
bingAPISdkHeader = "X-BingApis-SDK-Client"
|
||||||
|
golangBingAPISdkHeaderValue = "Go-SDK"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
||||||
|
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
||||||
|
// state of the formed HTTP request.
|
||||||
|
type Authorizer interface {
|
||||||
|
WithAuthorization() PrepareDecorator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
||||||
|
type NullAuthorizer struct{}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that does nothing.
|
||||||
|
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
return WithNothing()
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyAuthorizer implements API Key authorization.
|
||||||
|
type APIKeyAuthorizer struct {
|
||||||
|
headers map[string]interface{}
|
||||||
|
queryParameters map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers.
|
||||||
|
func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer {
|
||||||
|
return NewAPIKeyAuthorizer(headers, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters.
|
||||||
|
func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
||||||
|
return NewAPIKeyAuthorizer(nil, queryParameters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers.
|
||||||
|
func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer {
|
||||||
|
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
|
||||||
|
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CognitiveServicesAuthorizer implements authorization for Cognitive Services.
|
||||||
|
type CognitiveServicesAuthorizer struct {
|
||||||
|
subscriptionKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCognitiveServicesAuthorizer is
|
||||||
|
func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer {
|
||||||
|
return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization is
|
||||||
|
func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
headers := make(map[string]interface{})
|
||||||
|
headers[apiKeyAuthorizerHeader] = csa.subscriptionKey
|
||||||
|
headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue
|
||||||
|
|
||||||
|
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BearerAuthorizer implements the bearer authorization
|
||||||
|
type BearerAuthorizer struct {
|
||||||
|
tokenProvider adal.OAuthTokenProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider
|
||||||
|
func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer {
|
||||||
|
return &BearerAuthorizer{tokenProvider: tp}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||||
|
// value is "Bearer " followed by the token.
|
||||||
|
//
|
||||||
|
// By default, the token will be automatically refreshed through the Refresher interface.
|
||||||
|
func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err == nil {
|
||||||
|
// the ordering is important here, prefer RefresherWithContext if available
|
||||||
|
if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok {
|
||||||
|
err = refresher.EnsureFreshWithContext(r.Context())
|
||||||
|
} else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok {
|
||||||
|
err = refresher.EnsureFresh()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
var resp *http.Response
|
||||||
|
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
||||||
|
resp = tokError.Response()
|
||||||
|
}
|
||||||
|
return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp,
|
||||||
|
"Failed to refresh the Token for request to %s", r.URL)
|
||||||
|
}
|
||||||
|
return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())))
|
||||||
|
}
|
||||||
|
return r, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BearerAuthorizerCallbackFunc is the authentication callback signature.
|
||||||
|
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
|
||||||
|
|
||||||
|
// BearerAuthorizerCallback implements bearer authorization via a callback.
|
||||||
|
type BearerAuthorizerCallback struct {
|
||||||
|
sender Sender
|
||||||
|
callback BearerAuthorizerCallbackFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
||||||
|
// is invoked when the HTTP request is submitted.
|
||||||
|
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
||||||
|
if sender == nil {
|
||||||
|
sender = &http.Client{}
|
||||||
|
}
|
||||||
|
return &BearerAuthorizerCallback{sender: sender, callback: callback}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
||||||
|
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
|
||||||
|
//
|
||||||
|
// By default, the token will be automatically refreshed through the Refresher interface.
|
||||||
|
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err == nil {
|
||||||
|
// make a copy of the request and remove the body as it's not
|
||||||
|
// required and avoids us having to create a copy of it.
|
||||||
|
rCopy := *r
|
||||||
|
removeRequestBody(&rCopy)
|
||||||
|
|
||||||
|
resp, err := bacb.sender.Do(&rCopy)
|
||||||
|
if err == nil && resp.StatusCode == 401 {
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if hasBearerChallenge(resp) {
|
||||||
|
bc, err := newBearerChallenge(resp)
|
||||||
|
if err != nil {
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
if bacb.callback != nil {
|
||||||
|
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
|
||||||
|
if err != nil {
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
return Prepare(r, ba.WithAuthorization())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns true if the HTTP response contains a bearer challenge
|
||||||
|
func hasBearerChallenge(resp *http.Response) bool {
|
||||||
|
authHeader := resp.Header.Get(bearerChallengeHeader)
|
||||||
|
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type bearerChallenge struct {
|
||||||
|
values map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
|
||||||
|
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
|
||||||
|
trimmedChallenge := challenge[len(bearer)+1:]
|
||||||
|
|
||||||
|
// challenge is a set of key=value pairs that are comma delimited
|
||||||
|
pairs := strings.Split(trimmedChallenge, ",")
|
||||||
|
if len(pairs) < 1 {
|
||||||
|
err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
|
||||||
|
return bc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bc.values = make(map[string]string)
|
||||||
|
for i := range pairs {
|
||||||
|
trimmedPair := strings.TrimSpace(pairs[i])
|
||||||
|
pair := strings.Split(trimmedPair, "=")
|
||||||
|
if len(pair) == 2 {
|
||||||
|
// remove the enclosing quotes
|
||||||
|
key := strings.Trim(pair[0], "\"")
|
||||||
|
value := strings.Trim(pair[1], "\"")
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "authorization", "authorization_uri":
|
||||||
|
// strip the tenant ID from the authorization URL
|
||||||
|
asURL, err := url.Parse(value)
|
||||||
|
if err != nil {
|
||||||
|
return bc, err
|
||||||
|
}
|
||||||
|
bc.values[tenantID] = asURL.Path[1:]
|
||||||
|
default:
|
||||||
|
bc.values[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventGridKeyAuthorizer implements authorization for event grid using key authentication.
|
||||||
|
type EventGridKeyAuthorizer struct {
|
||||||
|
topicKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer
|
||||||
|
// with the specified topic key.
|
||||||
|
func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer {
|
||||||
|
return EventGridKeyAuthorizer{topicKey: topicKey}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header.
|
||||||
|
func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
headers := map[string]interface{}{
|
||||||
|
"aeg-sas-key": egta.topicKey,
|
||||||
|
}
|
||||||
|
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||||
|
}
|
35
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
35
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
|
@ -57,7 +57,22 @@ generated clients, see the Client described below.
|
||||||
*/
|
*/
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -73,6 +88,9 @@ const (
|
||||||
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
|
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
|
||||||
// and false otherwise.
|
// and false otherwise.
|
||||||
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
|
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
|
||||||
|
if resp == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return containsInt(codes, resp.StatusCode)
|
return containsInt(codes, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,3 +131,20 @@ func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Reque
|
||||||
|
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response.
|
||||||
|
func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) {
|
||||||
|
location := GetLocation(resp)
|
||||||
|
if location == "" {
|
||||||
|
return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := Prepare((&http.Request{}).WithContext(ctx),
|
||||||
|
AsGet(),
|
||||||
|
WithBaseURL(location))
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location)
|
||||||
|
}
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
339
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
339
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
|
@ -1,7 +1,23 @@
|
||||||
package azure
|
package azure
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -17,18 +33,190 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
methodDelete = "DELETE"
|
|
||||||
methodPatch = "PATCH"
|
|
||||||
methodPost = "POST"
|
|
||||||
methodPut = "PUT"
|
|
||||||
methodGet = "GET"
|
|
||||||
|
|
||||||
operationInProgress string = "InProgress"
|
operationInProgress string = "InProgress"
|
||||||
operationCanceled string = "Canceled"
|
operationCanceled string = "Canceled"
|
||||||
operationFailed string = "Failed"
|
operationFailed string = "Failed"
|
||||||
operationSucceeded string = "Succeeded"
|
operationSucceeded string = "Succeeded"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
|
||||||
|
|
||||||
|
// Future provides a mechanism to access the status and results of an asynchronous request.
|
||||||
|
// Since futures are stateful they should be passed by value to avoid race conditions.
|
||||||
|
type Future struct {
|
||||||
|
req *http.Request
|
||||||
|
resp *http.Response
|
||||||
|
ps pollingState
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFuture returns a new Future object initialized with the specified request.
|
||||||
|
func NewFuture(req *http.Request) Future {
|
||||||
|
return Future{req: req}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response returns the last HTTP response or nil if there isn't one.
|
||||||
|
func (f Future) Response() *http.Response {
|
||||||
|
return f.resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the last status message of the operation.
|
||||||
|
func (f Future) Status() string {
|
||||||
|
if f.ps.State == "" {
|
||||||
|
return "Unknown"
|
||||||
|
}
|
||||||
|
return f.ps.State
|
||||||
|
}
|
||||||
|
|
||||||
|
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
|
||||||
|
func (f Future) PollingMethod() PollingMethodType {
|
||||||
|
return f.ps.PollingMethod
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done queries the service to see if the operation has completed.
|
||||||
|
func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
||||||
|
// exit early if this future has terminated
|
||||||
|
if f.ps.hasTerminated() {
|
||||||
|
return true, f.errorInfo()
|
||||||
|
}
|
||||||
|
resp, err := sender.Do(f.req)
|
||||||
|
f.resp = resp
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
|
||||||
|
// check response body for error content
|
||||||
|
if resp.Body != nil {
|
||||||
|
type respErr struct {
|
||||||
|
ServiceError ServiceError `json:"error"`
|
||||||
|
}
|
||||||
|
re := respErr{}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(b, &re)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return false, re.ServiceError
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to return something meaningful
|
||||||
|
return false, ServiceError{
|
||||||
|
Code: fmt.Sprintf("%v", resp.StatusCode),
|
||||||
|
Message: resp.Status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = updatePollingState(resp, &f.ps)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.ps.hasTerminated() {
|
||||||
|
return true, f.errorInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
f.req, err = newPollingRequest(f.ps)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPollingDelay returns a duration the application should wait before checking
|
||||||
|
// the status of the asynchronous request and true; this value is returned from
|
||||||
|
// the service via the Retry-After response header. If the header wasn't returned
|
||||||
|
// then the function returns the zero-value time.Duration and false.
|
||||||
|
func (f Future) GetPollingDelay() (time.Duration, bool) {
|
||||||
|
if f.resp == nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
retry := f.resp.Header.Get(autorest.HeaderRetryAfter)
|
||||||
|
if retry == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := time.ParseDuration(retry + "s")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCompletion will return when one of the following conditions is met: the long
|
||||||
|
// running operation has completed, the provided context is cancelled, or the client's
|
||||||
|
// polling duration has been exceeded. It will retry failed polling attempts based on
|
||||||
|
// the retry value defined in the client up to the maximum retry attempts.
|
||||||
|
func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
done, err := f.Done(client)
|
||||||
|
for attempts := 0; !done; done, err = f.Done(client) {
|
||||||
|
if attempts >= client.RetryAttempts {
|
||||||
|
return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded")
|
||||||
|
}
|
||||||
|
// we want delayAttempt to be zero in the non-error case so
|
||||||
|
// that DelayForBackoff doesn't perform exponential back-off
|
||||||
|
var delayAttempt int
|
||||||
|
var delay time.Duration
|
||||||
|
if err == nil {
|
||||||
|
// check for Retry-After delay, if not present use the client's polling delay
|
||||||
|
var ok bool
|
||||||
|
delay, ok = f.GetPollingDelay()
|
||||||
|
if !ok {
|
||||||
|
delay = client.PollingDelay
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// there was an error polling for status so perform exponential
|
||||||
|
// back-off based on the number of attempts using the client's retry
|
||||||
|
// duration. update attempts after delayAttempt to avoid off-by-one.
|
||||||
|
delayAttempt = attempts
|
||||||
|
delay = client.RetryDuration
|
||||||
|
attempts++
|
||||||
|
}
|
||||||
|
// wait until the delay elapses or the context is cancelled
|
||||||
|
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
|
||||||
|
if !delayElapsed {
|
||||||
|
return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the operation failed the polling state will contain
|
||||||
|
// error information and implements the error interface
|
||||||
|
func (f *Future) errorInfo() error {
|
||||||
|
if !f.ps.hasSucceeded() {
|
||||||
|
return f.ps
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements the json.Marshaler interface.
|
||||||
|
func (f Future) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(&f.ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||||
|
func (f *Future) UnmarshalJSON(data []byte) error {
|
||||||
|
err := json.Unmarshal(data, &f.ps)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.req, err = newPollingRequest(f.ps)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PollingURL returns the URL used for retrieving the status of the long-running operation.
|
||||||
|
// For LROs that use the Location header the final URL value is used to retrieve the result.
|
||||||
|
func (f Future) PollingURL() string {
|
||||||
|
return f.ps.URI
|
||||||
|
}
|
||||||
|
|
||||||
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
|
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
|
||||||
// long-running operation. It will delay between requests for the duration specified in the
|
// long-running operation. It will delay between requests for the duration specified in the
|
||||||
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
|
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
|
||||||
|
@ -40,8 +228,7 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK}
|
if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
|
||||||
if !autorest.ResponseHasStatusCode(resp, pollingCodes...) {
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,10 +245,11 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err = newPollingRequest(resp, ps)
|
r, err = newPollingRequest(ps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
r = r.WithContext(resp.Request.Context())
|
||||||
|
|
||||||
delay = autorest.GetRetryAfter(resp, delay)
|
delay = autorest.GetRetryAfter(resp, delay)
|
||||||
resp, err = autorest.SendWithSender(s, r,
|
resp, err = autorest.SendWithSender(s, r,
|
||||||
|
@ -78,20 +266,15 @@ func getAsyncOperation(resp *http.Response) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasSucceeded(state string) bool {
|
func hasSucceeded(state string) bool {
|
||||||
return state == operationSucceeded
|
return strings.EqualFold(state, operationSucceeded)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasTerminated(state string) bool {
|
func hasTerminated(state string) bool {
|
||||||
switch state {
|
return strings.EqualFold(state, operationCanceled) || strings.EqualFold(state, operationFailed) || strings.EqualFold(state, operationSucceeded)
|
||||||
case operationCanceled, operationFailed, operationSucceeded:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasFailed(state string) bool {
|
func hasFailed(state string) bool {
|
||||||
return state == operationFailed
|
return strings.EqualFold(state, operationFailed)
|
||||||
}
|
}
|
||||||
|
|
||||||
type provisioningTracker interface {
|
type provisioningTracker interface {
|
||||||
|
@ -149,39 +332,50 @@ func (ps provisioningStatus) hasTerminated() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps provisioningStatus) hasProvisioningError() bool {
|
func (ps provisioningStatus) hasProvisioningError() bool {
|
||||||
return ps.ProvisioningError != ServiceError{}
|
// code and message are required fields so only check them
|
||||||
|
return len(ps.ProvisioningError.Code) > 0 ||
|
||||||
|
len(ps.ProvisioningError.Message) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
type pollingResponseFormat string
|
// PollingMethodType defines a type used for enumerating polling mechanisms.
|
||||||
|
type PollingMethodType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
usesOperationResponse pollingResponseFormat = "OperationResponse"
|
// PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header.
|
||||||
usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus"
|
PollingAsyncOperation PollingMethodType = "AsyncOperation"
|
||||||
formatIsUnknown pollingResponseFormat = ""
|
|
||||||
|
// PollingLocation indicates the polling method uses the Location header.
|
||||||
|
PollingLocation PollingMethodType = "Location"
|
||||||
|
|
||||||
|
// PollingUnknown indicates an unknown polling method and is the default value.
|
||||||
|
PollingUnknown PollingMethodType = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
type pollingState struct {
|
type pollingState struct {
|
||||||
responseFormat pollingResponseFormat
|
PollingMethod PollingMethodType `json:"pollingMethod"`
|
||||||
uri string
|
URI string `json:"uri"`
|
||||||
state string
|
State string `json:"state"`
|
||||||
code string
|
ServiceError *ServiceError `json:"error,omitempty"`
|
||||||
message string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps pollingState) hasSucceeded() bool {
|
func (ps pollingState) hasSucceeded() bool {
|
||||||
return hasSucceeded(ps.state)
|
return hasSucceeded(ps.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps pollingState) hasTerminated() bool {
|
func (ps pollingState) hasTerminated() bool {
|
||||||
return hasTerminated(ps.state)
|
return hasTerminated(ps.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps pollingState) hasFailed() bool {
|
func (ps pollingState) hasFailed() bool {
|
||||||
return hasFailed(ps.state)
|
return hasFailed(ps.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps pollingState) Error() string {
|
func (ps pollingState) Error() string {
|
||||||
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message)
|
s := fmt.Sprintf("Long running operation terminated with status '%s'", ps.State)
|
||||||
|
if ps.ServiceError != nil {
|
||||||
|
s = fmt.Sprintf("%s: %+v", s, *ps.ServiceError)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// updatePollingState maps the operation status -- retrieved from either a provisioningState
|
// updatePollingState maps the operation status -- retrieved from either a provisioningState
|
||||||
|
@ -196,7 +390,7 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
|
||||||
// -- The first response will always be a provisioningStatus response; only the polling requests,
|
// -- The first response will always be a provisioningStatus response; only the polling requests,
|
||||||
// depending on the header returned, may be something otherwise.
|
// depending on the header returned, may be something otherwise.
|
||||||
var pt provisioningTracker
|
var pt provisioningTracker
|
||||||
if ps.responseFormat == usesOperationResponse {
|
if ps.PollingMethod == PollingAsyncOperation {
|
||||||
pt = &operationResource{}
|
pt = &operationResource{}
|
||||||
} else {
|
} else {
|
||||||
pt = &provisioningStatus{}
|
pt = &provisioningStatus{}
|
||||||
|
@ -204,30 +398,30 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
|
||||||
|
|
||||||
// If this is the first request (that is, the polling response shape is unknown), determine how
|
// If this is the first request (that is, the polling response shape is unknown), determine how
|
||||||
// to poll and what to expect
|
// to poll and what to expect
|
||||||
if ps.responseFormat == formatIsUnknown {
|
if ps.PollingMethod == PollingUnknown {
|
||||||
req := resp.Request
|
req := resp.Request
|
||||||
if req == nil {
|
if req == nil {
|
||||||
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
|
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefer the Azure-AsyncOperation header
|
// Prefer the Azure-AsyncOperation header
|
||||||
ps.uri = getAsyncOperation(resp)
|
ps.URI = getAsyncOperation(resp)
|
||||||
if ps.uri != "" {
|
if ps.URI != "" {
|
||||||
ps.responseFormat = usesOperationResponse
|
ps.PollingMethod = PollingAsyncOperation
|
||||||
} else {
|
} else {
|
||||||
ps.responseFormat = usesProvisioningStatus
|
ps.PollingMethod = PollingLocation
|
||||||
}
|
}
|
||||||
|
|
||||||
// Else, use the Location header
|
// Else, use the Location header
|
||||||
if ps.uri == "" {
|
if ps.URI == "" {
|
||||||
ps.uri = autorest.GetLocation(resp)
|
ps.URI = autorest.GetLocation(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lastly, requests against an existing resource, use the last request URI
|
// Lastly, requests against an existing resource, use the last request URI
|
||||||
if ps.uri == "" {
|
if ps.URI == "" {
|
||||||
m := strings.ToUpper(req.Method)
|
m := strings.ToUpper(req.Method)
|
||||||
if m == methodPatch || m == methodPut || m == methodGet {
|
if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet {
|
||||||
ps.uri = req.URL.String()
|
ps.URI = req.URL.String()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -248,23 +442,23 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
|
||||||
// -- Unknown states are per-service inprogress states
|
// -- Unknown states are per-service inprogress states
|
||||||
// -- Otherwise, infer state from HTTP status code
|
// -- Otherwise, infer state from HTTP status code
|
||||||
if pt.hasTerminated() {
|
if pt.hasTerminated() {
|
||||||
ps.state = pt.state()
|
ps.State = pt.state()
|
||||||
} else if pt.state() != "" {
|
} else if pt.state() != "" {
|
||||||
ps.state = operationInProgress
|
ps.State = operationInProgress
|
||||||
} else {
|
} else {
|
||||||
switch resp.StatusCode {
|
switch resp.StatusCode {
|
||||||
case http.StatusAccepted:
|
case http.StatusAccepted:
|
||||||
ps.state = operationInProgress
|
ps.State = operationInProgress
|
||||||
|
|
||||||
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
|
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
|
||||||
ps.state = operationSucceeded
|
ps.State = operationSucceeded
|
||||||
|
|
||||||
default:
|
default:
|
||||||
ps.state = operationFailed
|
ps.State = operationFailed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ps.state == operationInProgress && ps.uri == "" {
|
if strings.EqualFold(ps.State, operationInProgress) && ps.URI == "" {
|
||||||
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
|
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,36 +467,45 @@ func updatePollingState(resp *http.Response, ps *pollingState) error {
|
||||||
// -- Response
|
// -- Response
|
||||||
// -- Otherwise, Unknown
|
// -- Otherwise, Unknown
|
||||||
if ps.hasFailed() {
|
if ps.hasFailed() {
|
||||||
if ps.responseFormat == usesOperationResponse {
|
if or, ok := pt.(*operationResource); ok {
|
||||||
or := pt.(*operationResource)
|
ps.ServiceError = &or.OperationError
|
||||||
ps.code = or.OperationError.Code
|
} else if p, ok := pt.(*provisioningStatus); ok && p.hasProvisioningError() {
|
||||||
ps.message = or.OperationError.Message
|
ps.ServiceError = &p.ProvisioningError
|
||||||
} else {
|
} else {
|
||||||
p := pt.(*provisioningStatus)
|
ps.ServiceError = &ServiceError{
|
||||||
if p.hasProvisioningError() {
|
Code: "Unknown",
|
||||||
ps.code = p.ProvisioningError.Code
|
Message: "None",
|
||||||
ps.message = p.ProvisioningError.Message
|
|
||||||
} else {
|
|
||||||
ps.code = "Unknown"
|
|
||||||
ps.message = "None"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) {
|
func newPollingRequest(ps pollingState) (*http.Request, error) {
|
||||||
req := resp.Request
|
reqPoll, err := autorest.Prepare(&http.Request{},
|
||||||
if req == nil {
|
|
||||||
return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing")
|
|
||||||
}
|
|
||||||
|
|
||||||
reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel},
|
|
||||||
autorest.AsGet(),
|
autorest.AsGet(),
|
||||||
autorest.WithBaseURL(ps.uri))
|
autorest.WithBaseURL(ps.URI))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri)
|
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI)
|
||||||
}
|
}
|
||||||
|
|
||||||
return reqPoll, nil
|
return reqPoll, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AsyncOpIncompleteError is the type that's returned from a future that has not completed.
|
||||||
|
type AsyncOpIncompleteError struct {
|
||||||
|
// FutureType is the name of the type composed of a azure.Future.
|
||||||
|
FutureType string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns an error message including the originating type name of the error.
|
||||||
|
func (e AsyncOpIncompleteError) Error() string {
|
||||||
|
return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters.
|
||||||
|
func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError {
|
||||||
|
return AsyncOpIncompleteError{
|
||||||
|
FutureType: futureType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
153
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
153
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
|
@ -1,16 +1,29 @@
|
||||||
/*
|
// Package azure provides Azure-specific implementations used with AutoRest.
|
||||||
Package azure provides Azure-specific implementations used with AutoRest.
|
// See the included examples for more detail.
|
||||||
|
|
||||||
See the included examples for more detail.
|
|
||||||
*/
|
|
||||||
package azure
|
package azure
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
"github.com/Azure/go-autorest/autorest"
|
||||||
)
|
)
|
||||||
|
@ -29,21 +42,88 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServiceError encapsulates the error response from an Azure service.
|
// ServiceError encapsulates the error response from an Azure service.
|
||||||
|
// It adhears to the OData v4 specification for error responses.
|
||||||
type ServiceError struct {
|
type ServiceError struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
Details *[]interface{} `json:"details"`
|
Target *string `json:"target"`
|
||||||
|
Details []map[string]interface{} `json:"details"`
|
||||||
|
InnerError map[string]interface{} `json:"innererror"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (se ServiceError) Error() string {
|
func (se ServiceError) Error() string {
|
||||||
if se.Details != nil {
|
result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
|
||||||
d, err := json.Marshal(*(se.Details))
|
|
||||||
if err != nil {
|
if se.Target != nil {
|
||||||
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details)
|
result += fmt.Sprintf(" Target=%q", *se.Target)
|
||||||
}
|
|
||||||
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d))
|
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
|
|
||||||
|
if se.Details != nil {
|
||||||
|
d, err := json.Marshal(se.Details)
|
||||||
|
if err != nil {
|
||||||
|
result += fmt.Sprintf(" Details=%v", se.Details)
|
||||||
|
}
|
||||||
|
result += fmt.Sprintf(" Details=%v", string(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
if se.InnerError != nil {
|
||||||
|
d, err := json.Marshal(se.InnerError)
|
||||||
|
if err != nil {
|
||||||
|
result += fmt.Sprintf(" InnerError=%v", se.InnerError)
|
||||||
|
}
|
||||||
|
result += fmt.Sprintf(" InnerError=%v", string(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
|
||||||
|
func (se *ServiceError) UnmarshalJSON(b []byte) error {
|
||||||
|
// per the OData v4 spec the details field must be an array of JSON objects.
|
||||||
|
// unfortunately not all services adhear to the spec and just return a single
|
||||||
|
// object instead of an array with one object. so we have to perform some
|
||||||
|
// shenanigans to accommodate both cases.
|
||||||
|
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
|
||||||
|
|
||||||
|
type serviceError1 struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Target *string `json:"target"`
|
||||||
|
Details []map[string]interface{} `json:"details"`
|
||||||
|
InnerError map[string]interface{} `json:"innererror"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type serviceError2 struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Target *string `json:"target"`
|
||||||
|
Details map[string]interface{} `json:"details"`
|
||||||
|
InnerError map[string]interface{} `json:"innererror"`
|
||||||
|
}
|
||||||
|
|
||||||
|
se1 := serviceError1{}
|
||||||
|
err := json.Unmarshal(b, &se1)
|
||||||
|
if err == nil {
|
||||||
|
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
se2 := serviceError2{}
|
||||||
|
err = json.Unmarshal(b, &se2)
|
||||||
|
if err == nil {
|
||||||
|
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError)
|
||||||
|
se.Details = append(se.Details, se2.Details)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) {
|
||||||
|
se.Code = code
|
||||||
|
se.Message = message
|
||||||
|
se.Target = target
|
||||||
|
se.Details = details
|
||||||
|
se.InnerError = inner
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestError describes an error response returned by Azure service.
|
// RequestError describes an error response returned by Azure service.
|
||||||
|
@ -69,6 +149,41 @@ func IsAzureError(e error) bool {
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resource contains details about an Azure resource.
|
||||||
|
type Resource struct {
|
||||||
|
SubscriptionID string
|
||||||
|
ResourceGroup string
|
||||||
|
Provider string
|
||||||
|
ResourceType string
|
||||||
|
ResourceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseResourceID parses a resource ID into a ResourceDetails struct.
|
||||||
|
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4.
|
||||||
|
func ParseResourceID(resourceID string) (Resource, error) {
|
||||||
|
|
||||||
|
const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)`
|
||||||
|
resourceIDPattern := regexp.MustCompile(resourceIDPatternText)
|
||||||
|
match := resourceIDPattern.FindStringSubmatch(resourceID)
|
||||||
|
|
||||||
|
if len(match) == 0 {
|
||||||
|
return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := strings.Split(match[5], "/")
|
||||||
|
resourceName := v[len(v)-1]
|
||||||
|
|
||||||
|
result := Resource{
|
||||||
|
SubscriptionID: match[1],
|
||||||
|
ResourceGroup: match[2],
|
||||||
|
Provider: match[3],
|
||||||
|
ResourceType: match[4],
|
||||||
|
ResourceName: resourceName,
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewErrorWithError creates a new Error conforming object from the
|
// NewErrorWithError creates a new Error conforming object from the
|
||||||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
||||||
// if resp is nil), message, and original error. message is treated as a format
|
// if resp is nil), message, and original error. message is treated as a format
|
||||||
|
@ -165,7 +280,13 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
|
||||||
if decodeErr != nil {
|
if decodeErr != nil {
|
||||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
|
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
|
||||||
} else if e.ServiceError == nil {
|
} else if e.ServiceError == nil {
|
||||||
e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"}
|
// Check if error is unwrapped ServiceError
|
||||||
|
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" {
|
||||||
|
e.ServiceError = &ServiceError{
|
||||||
|
Code: "Unknown",
|
||||||
|
Message: "Unknown service error",
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.RequestID = ExtractRequestID(resp)
|
e.RequestID = ExtractRequestID(resp)
|
||||||
|
|
13
vendor/github.com/Azure/go-autorest/autorest/azure/config.go
generated
vendored
13
vendor/github.com/Azure/go-autorest/autorest/azure/config.go
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
package azure
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OAuthConfig represents the endpoints needed
|
|
||||||
// in OAuth operations
|
|
||||||
type OAuthConfig struct {
|
|
||||||
AuthorizeEndpoint url.URL
|
|
||||||
TokenEndpoint url.URL
|
|
||||||
DeviceCodeEndpoint url.URL
|
|
||||||
}
|
|
96
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
96
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
|
@ -1,14 +1,30 @@
|
||||||
package azure
|
package azure
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
|
||||||
activeDirectoryAPIVersion = "1.0"
|
// to be used while populating the Azure Environment.
|
||||||
)
|
const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
|
||||||
|
|
||||||
var environments = map[string]Environment{
|
var environments = map[string]Environment{
|
||||||
"AZURECHINACLOUD": ChinaCloud,
|
"AZURECHINACLOUD": ChinaCloud,
|
||||||
|
@ -28,6 +44,8 @@ type Environment struct {
|
||||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
GalleryEndpoint string `json:"galleryEndpoint"`
|
||||||
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
||||||
GraphEndpoint string `json:"graphEndpoint"`
|
GraphEndpoint string `json:"graphEndpoint"`
|
||||||
|
ServiceBusEndpoint string `json:"serviceBusEndpoint"`
|
||||||
|
BatchManagementEndpoint string `json:"batchManagementEndpoint"`
|
||||||
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
||||||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
||||||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
||||||
|
@ -36,6 +54,7 @@ type Environment struct {
|
||||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
||||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
||||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||||
|
TokenAudience string `json:"tokenAudience"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -50,14 +69,17 @@ var (
|
||||||
GalleryEndpoint: "https://gallery.azure.com/",
|
GalleryEndpoint: "https://gallery.azure.com/",
|
||||||
KeyVaultEndpoint: "https://vault.azure.net/",
|
KeyVaultEndpoint: "https://vault.azure.net/",
|
||||||
GraphEndpoint: "https://graph.windows.net/",
|
GraphEndpoint: "https://graph.windows.net/",
|
||||||
|
ServiceBusEndpoint: "https://servicebus.windows.net/",
|
||||||
|
BatchManagementEndpoint: "https://batch.core.windows.net/",
|
||||||
StorageEndpointSuffix: "core.windows.net",
|
StorageEndpointSuffix: "core.windows.net",
|
||||||
SQLDatabaseDNSSuffix: "database.windows.net",
|
SQLDatabaseDNSSuffix: "database.windows.net",
|
||||||
TrafficManagerDNSSuffix: "trafficmanager.net",
|
TrafficManagerDNSSuffix: "trafficmanager.net",
|
||||||
KeyVaultDNSSuffix: "vault.azure.net",
|
KeyVaultDNSSuffix: "vault.azure.net",
|
||||||
ServiceBusEndpointSuffix: "servicebus.azure.com",
|
ServiceBusEndpointSuffix: "servicebus.windows.net",
|
||||||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||||
|
TokenAudience: "https://management.azure.com/",
|
||||||
}
|
}
|
||||||
|
|
||||||
// USGovernmentCloud is the cloud environment for the US Government
|
// USGovernmentCloud is the cloud environment for the US Government
|
||||||
|
@ -67,10 +89,12 @@ var (
|
||||||
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
|
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
|
||||||
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
|
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
|
||||||
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
|
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
|
||||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
|
ActiveDirectoryEndpoint: "https://login.microsoftonline.us/",
|
||||||
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
|
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
|
||||||
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
|
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
|
||||||
GraphEndpoint: "https://graph.usgovcloudapi.net/",
|
GraphEndpoint: "https://graph.windows.net/",
|
||||||
|
ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/",
|
||||||
|
BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/",
|
||||||
StorageEndpointSuffix: "core.usgovcloudapi.net",
|
StorageEndpointSuffix: "core.usgovcloudapi.net",
|
||||||
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
|
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
|
||||||
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
|
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
|
||||||
|
@ -79,6 +103,7 @@ var (
|
||||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||||
|
TokenAudience: "https://management.usgovcloudapi.net/",
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChinaCloud is the cloud environment operated in China
|
// ChinaCloud is the cloud environment operated in China
|
||||||
|
@ -92,14 +117,17 @@ var (
|
||||||
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
|
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
|
||||||
KeyVaultEndpoint: "https://vault.azure.cn/",
|
KeyVaultEndpoint: "https://vault.azure.cn/",
|
||||||
GraphEndpoint: "https://graph.chinacloudapi.cn/",
|
GraphEndpoint: "https://graph.chinacloudapi.cn/",
|
||||||
|
ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/",
|
||||||
|
BatchManagementEndpoint: "https://batch.chinacloudapi.cn/",
|
||||||
StorageEndpointSuffix: "core.chinacloudapi.cn",
|
StorageEndpointSuffix: "core.chinacloudapi.cn",
|
||||||
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
|
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
|
||||||
TrafficManagerDNSSuffix: "trafficmanager.cn",
|
TrafficManagerDNSSuffix: "trafficmanager.cn",
|
||||||
KeyVaultDNSSuffix: "vault.azure.cn",
|
KeyVaultDNSSuffix: "vault.azure.cn",
|
||||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net",
|
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
||||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||||
|
TokenAudience: "https://management.chinacloudapi.cn/",
|
||||||
}
|
}
|
||||||
|
|
||||||
// GermanCloud is the cloud environment operated in Germany
|
// GermanCloud is the cloud environment operated in Germany
|
||||||
|
@ -113,6 +141,8 @@ var (
|
||||||
GalleryEndpoint: "https://gallery.cloudapi.de/",
|
GalleryEndpoint: "https://gallery.cloudapi.de/",
|
||||||
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
|
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
|
||||||
GraphEndpoint: "https://graph.cloudapi.de/",
|
GraphEndpoint: "https://graph.cloudapi.de/",
|
||||||
|
ServiceBusEndpoint: "https://servicebus.cloudapi.de/",
|
||||||
|
BatchManagementEndpoint: "https://batch.cloudapi.de/",
|
||||||
StorageEndpointSuffix: "core.cloudapi.de",
|
StorageEndpointSuffix: "core.cloudapi.de",
|
||||||
SQLDatabaseDNSSuffix: "database.cloudapi.de",
|
SQLDatabaseDNSSuffix: "database.cloudapi.de",
|
||||||
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
|
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
|
||||||
|
@ -121,47 +151,41 @@ var (
|
||||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
||||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
||||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||||
|
TokenAudience: "https://management.microsoftazure.de/",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// EnvironmentFromName returns an Environment based on the common name specified
|
// EnvironmentFromName returns an Environment based on the common name specified.
|
||||||
func EnvironmentFromName(name string) (Environment, error) {
|
func EnvironmentFromName(name string) (Environment, error) {
|
||||||
|
// IMPORTANT
|
||||||
|
// As per @radhikagupta5:
|
||||||
|
// This is technical debt, fundamentally here because Kubernetes is not currently accepting
|
||||||
|
// contributions to the providers. Once that is an option, the provider should be updated to
|
||||||
|
// directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation
|
||||||
|
// from this method based on the name that is provided to us.
|
||||||
|
if strings.EqualFold(name, "AZURESTACKCLOUD") {
|
||||||
|
return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName))
|
||||||
|
}
|
||||||
|
|
||||||
name = strings.ToUpper(name)
|
name = strings.ToUpper(name)
|
||||||
env, ok := environments[name]
|
env, ok := environments[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
|
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return env, nil
|
return env, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls
|
// EnvironmentFromFile loads an Environment from a configuration file available on disk.
|
||||||
func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) {
|
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
|
||||||
return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID)
|
// endpoints.
|
||||||
}
|
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
|
||||||
|
fileContents, err := ioutil.ReadFile(location)
|
||||||
// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint
|
|
||||||
func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
|
||||||
template := "%s/oauth2/%s?api-version=%s"
|
|
||||||
u, err := url.Parse(activeDirectoryEndpoint)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return
|
||||||
}
|
|
||||||
authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &OAuthConfig{
|
err = json.Unmarshal(fileContents, &unmarshaled)
|
||||||
AuthorizeEndpoint: *authorizeURL,
|
|
||||||
TokenEndpoint: *tokenURL,
|
return
|
||||||
DeviceCodeEndpoint: *deviceCodeURL,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
245
vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
generated
vendored
Normal file
245
vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
generated
vendored
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
package azure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
type audience []string
|
||||||
|
|
||||||
|
type authentication struct {
|
||||||
|
LoginEndpoint string `json:"loginEndpoint"`
|
||||||
|
Audiences audience `json:"audiences"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type environmentMetadataInfo struct {
|
||||||
|
GalleryEndpoint string `json:"galleryEndpoint"`
|
||||||
|
GraphEndpoint string `json:"graphEndpoint"`
|
||||||
|
PortalEndpoint string `json:"portalEndpoint"`
|
||||||
|
Authentication authentication `json:"authentication"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnvironmentProperty represent property names that clients can override
|
||||||
|
type EnvironmentProperty string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// EnvironmentName ...
|
||||||
|
EnvironmentName EnvironmentProperty = "name"
|
||||||
|
// EnvironmentManagementPortalURL ..
|
||||||
|
EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL"
|
||||||
|
// EnvironmentPublishSettingsURL ...
|
||||||
|
EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL"
|
||||||
|
// EnvironmentServiceManagementEndpoint ...
|
||||||
|
EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint"
|
||||||
|
// EnvironmentResourceManagerEndpoint ...
|
||||||
|
EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint"
|
||||||
|
// EnvironmentActiveDirectoryEndpoint ...
|
||||||
|
EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint"
|
||||||
|
// EnvironmentGalleryEndpoint ...
|
||||||
|
EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint"
|
||||||
|
// EnvironmentKeyVaultEndpoint ...
|
||||||
|
EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint"
|
||||||
|
// EnvironmentGraphEndpoint ...
|
||||||
|
EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint"
|
||||||
|
// EnvironmentServiceBusEndpoint ...
|
||||||
|
EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint"
|
||||||
|
// EnvironmentBatchManagementEndpoint ...
|
||||||
|
EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint"
|
||||||
|
// EnvironmentStorageEndpointSuffix ...
|
||||||
|
EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix"
|
||||||
|
// EnvironmentSQLDatabaseDNSSuffix ...
|
||||||
|
EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix"
|
||||||
|
// EnvironmentTrafficManagerDNSSuffix ...
|
||||||
|
EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix"
|
||||||
|
// EnvironmentKeyVaultDNSSuffix ...
|
||||||
|
EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix"
|
||||||
|
// EnvironmentServiceBusEndpointSuffix ...
|
||||||
|
EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix"
|
||||||
|
// EnvironmentServiceManagementVMDNSSuffix ...
|
||||||
|
EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix"
|
||||||
|
// EnvironmentResourceManagerVMDNSSuffix ...
|
||||||
|
EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix"
|
||||||
|
// EnvironmentContainerRegistryDNSSuffix ...
|
||||||
|
EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix"
|
||||||
|
// EnvironmentTokenAudience ...
|
||||||
|
EnvironmentTokenAudience EnvironmentProperty = "tokenAudience"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OverrideProperty represents property name and value that clients can override
|
||||||
|
type OverrideProperty struct {
|
||||||
|
Key EnvironmentProperty
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnvironmentFromURL loads an Environment from a URL
|
||||||
|
// This function is particularly useful in the Hybrid Cloud model, where one may define their own
|
||||||
|
// endpoints.
|
||||||
|
func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) {
|
||||||
|
var metadataEnvProperties environmentMetadataInfo
|
||||||
|
|
||||||
|
if resourceManagerEndpoint == "" {
|
||||||
|
return environment, fmt.Errorf("Metadata resource manager endpoint is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil {
|
||||||
|
return environment, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Give priority to user's override values
|
||||||
|
overrideProperties(&environment, properties)
|
||||||
|
|
||||||
|
if environment.Name == "" {
|
||||||
|
environment.Name = "HybridEnvironment"
|
||||||
|
}
|
||||||
|
stampDNSSuffix := environment.StorageEndpointSuffix
|
||||||
|
if stampDNSSuffix == "" {
|
||||||
|
stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/")
|
||||||
|
environment.StorageEndpointSuffix = stampDNSSuffix
|
||||||
|
}
|
||||||
|
if environment.KeyVaultDNSSuffix == "" {
|
||||||
|
environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix)
|
||||||
|
}
|
||||||
|
if environment.KeyVaultEndpoint == "" {
|
||||||
|
environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix)
|
||||||
|
}
|
||||||
|
if environment.TokenAudience == "" {
|
||||||
|
environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0]
|
||||||
|
}
|
||||||
|
if environment.ActiveDirectoryEndpoint == "" {
|
||||||
|
environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint
|
||||||
|
}
|
||||||
|
if environment.ResourceManagerEndpoint == "" {
|
||||||
|
environment.ResourceManagerEndpoint = resourceManagerEndpoint
|
||||||
|
}
|
||||||
|
if environment.GalleryEndpoint == "" {
|
||||||
|
environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint
|
||||||
|
}
|
||||||
|
if environment.GraphEndpoint == "" {
|
||||||
|
environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
return environment, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func overrideProperties(environment *Environment, properties []OverrideProperty) {
|
||||||
|
for _, property := range properties {
|
||||||
|
switch property.Key {
|
||||||
|
case EnvironmentName:
|
||||||
|
{
|
||||||
|
environment.Name = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentManagementPortalURL:
|
||||||
|
{
|
||||||
|
environment.ManagementPortalURL = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentPublishSettingsURL:
|
||||||
|
{
|
||||||
|
environment.PublishSettingsURL = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentServiceManagementEndpoint:
|
||||||
|
{
|
||||||
|
environment.ServiceManagementEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentResourceManagerEndpoint:
|
||||||
|
{
|
||||||
|
environment.ResourceManagerEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentActiveDirectoryEndpoint:
|
||||||
|
{
|
||||||
|
environment.ActiveDirectoryEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentGalleryEndpoint:
|
||||||
|
{
|
||||||
|
environment.GalleryEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentKeyVaultEndpoint:
|
||||||
|
{
|
||||||
|
environment.KeyVaultEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentGraphEndpoint:
|
||||||
|
{
|
||||||
|
environment.GraphEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentServiceBusEndpoint:
|
||||||
|
{
|
||||||
|
environment.ServiceBusEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentBatchManagementEndpoint:
|
||||||
|
{
|
||||||
|
environment.BatchManagementEndpoint = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentStorageEndpointSuffix:
|
||||||
|
{
|
||||||
|
environment.StorageEndpointSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentSQLDatabaseDNSSuffix:
|
||||||
|
{
|
||||||
|
environment.SQLDatabaseDNSSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentTrafficManagerDNSSuffix:
|
||||||
|
{
|
||||||
|
environment.TrafficManagerDNSSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentKeyVaultDNSSuffix:
|
||||||
|
{
|
||||||
|
environment.KeyVaultDNSSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentServiceBusEndpointSuffix:
|
||||||
|
{
|
||||||
|
environment.ServiceBusEndpointSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentServiceManagementVMDNSSuffix:
|
||||||
|
{
|
||||||
|
environment.ServiceManagementVMDNSSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentResourceManagerVMDNSSuffix:
|
||||||
|
{
|
||||||
|
environment.ResourceManagerVMDNSSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentContainerRegistryDNSSuffix:
|
||||||
|
{
|
||||||
|
environment.ContainerRegistryDNSSuffix = property.Value
|
||||||
|
}
|
||||||
|
case EnvironmentTokenAudience:
|
||||||
|
{
|
||||||
|
environment.TokenAudience = property.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) {
|
||||||
|
client := autorest.NewClientWithUserAgent("")
|
||||||
|
managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0")
|
||||||
|
req, _ := http.NewRequest("GET", managementEndpoint, nil)
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return environment, err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
jsonResponse, err := ioutil.ReadAll(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
return environment, err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(jsonResponse, &environment)
|
||||||
|
return environment, err
|
||||||
|
}
|
200
vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
generated
vendored
Normal file
200
vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package azure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DoRetryWithRegistration tries to register the resource provider in case it is unregistered.
|
||||||
|
// It also handles request retries
|
||||||
|
func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
|
||||||
|
return func(s autorest.Sender) autorest.Sender {
|
||||||
|
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := autorest.NewRetriableRequest(r)
|
||||||
|
for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ {
|
||||||
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = autorest.SendWithSender(s, rr.Request(),
|
||||||
|
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
var re RequestError
|
||||||
|
err = autorest.Respond(
|
||||||
|
resp,
|
||||||
|
autorest.ByUnmarshallingJSON(&re),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
err = re
|
||||||
|
|
||||||
|
if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" {
|
||||||
|
regErr := register(client, r, re)
|
||||||
|
if regErr != nil {
|
||||||
|
return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, fmt.Errorf("failed request: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProvider(re RequestError) (string, error) {
|
||||||
|
if re.ServiceError != nil && len(re.ServiceError.Details) > 0 {
|
||||||
|
return re.ServiceError.Details[0]["target"].(string), nil
|
||||||
|
}
|
||||||
|
return "", errors.New("provider was not found in the response")
|
||||||
|
}
|
||||||
|
|
||||||
|
func register(client autorest.Client, originalReq *http.Request, re RequestError) error {
|
||||||
|
subID := getSubscription(originalReq.URL.Path)
|
||||||
|
if subID == "" {
|
||||||
|
return errors.New("missing parameter subscriptionID to register resource provider")
|
||||||
|
}
|
||||||
|
providerName, err := getProvider(re)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("missing parameter provider to register resource provider: %s", err)
|
||||||
|
}
|
||||||
|
newURL := url.URL{
|
||||||
|
Scheme: originalReq.URL.Scheme,
|
||||||
|
Host: originalReq.URL.Host,
|
||||||
|
}
|
||||||
|
|
||||||
|
// taken from the resources SDK
|
||||||
|
// with almost identical code, this sections are easier to mantain
|
||||||
|
// It is also not a good idea to import the SDK here
|
||||||
|
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252
|
||||||
|
pathParameters := map[string]interface{}{
|
||||||
|
"resourceProviderNamespace": autorest.Encode("path", providerName),
|
||||||
|
"subscriptionId": autorest.Encode("path", subID),
|
||||||
|
}
|
||||||
|
|
||||||
|
const APIVersion = "2016-09-01"
|
||||||
|
queryParameters := map[string]interface{}{
|
||||||
|
"api-version": APIVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
preparer := autorest.CreatePreparer(
|
||||||
|
autorest.AsPost(),
|
||||||
|
autorest.WithBaseURL(newURL.String()),
|
||||||
|
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
|
||||||
|
autorest.WithQueryParameters(queryParameters),
|
||||||
|
)
|
||||||
|
|
||||||
|
req, err := preparer.Prepare(&http.Request{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(originalReq.Context())
|
||||||
|
|
||||||
|
resp, err := autorest.SendWithSender(client, req,
|
||||||
|
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type Provider struct {
|
||||||
|
RegistrationState *string `json:"registrationState,omitempty"`
|
||||||
|
}
|
||||||
|
var provider Provider
|
||||||
|
|
||||||
|
err = autorest.Respond(
|
||||||
|
resp,
|
||||||
|
WithErrorUnlessStatusCode(http.StatusOK),
|
||||||
|
autorest.ByUnmarshallingJSON(&provider),
|
||||||
|
autorest.ByClosing(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// poll for registered provisioning state
|
||||||
|
now := time.Now()
|
||||||
|
for err == nil && time.Since(now) < client.PollingDuration {
|
||||||
|
// taken from the resources SDK
|
||||||
|
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
|
||||||
|
preparer := autorest.CreatePreparer(
|
||||||
|
autorest.AsGet(),
|
||||||
|
autorest.WithBaseURL(newURL.String()),
|
||||||
|
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
|
||||||
|
autorest.WithQueryParameters(queryParameters),
|
||||||
|
)
|
||||||
|
req, err = preparer.Prepare(&http.Request{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(originalReq.Context())
|
||||||
|
|
||||||
|
resp, err := autorest.SendWithSender(client, req,
|
||||||
|
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = autorest.Respond(
|
||||||
|
resp,
|
||||||
|
WithErrorUnlessStatusCode(http.StatusOK),
|
||||||
|
autorest.ByUnmarshallingJSON(&provider),
|
||||||
|
autorest.ByClosing(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if provider.RegistrationState != nil &&
|
||||||
|
*provider.RegistrationState == "Registered" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done())
|
||||||
|
if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) {
|
||||||
|
return originalReq.Context().Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !(time.Since(now) < client.PollingDuration) {
|
||||||
|
return errors.New("polling for resource provider registration has exceeded the polling duration")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSubscription(path string) string {
|
||||||
|
parts := strings.Split(path, "/")
|
||||||
|
for i, v := range parts {
|
||||||
|
if v == "subscriptions" && (i+1) < len(parts) {
|
||||||
|
return parts[i+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
363
vendor/github.com/Azure/go-autorest/autorest/azure/token.go
generated
vendored
363
vendor/github.com/Azure/go-autorest/autorest/azure/token.go
generated
vendored
|
@ -1,363 +0,0 @@
|
||||||
package azure
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest"
|
|
||||||
"github.com/dgrijalva/jwt-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultRefresh = 5 * time.Minute
|
|
||||||
tokenBaseDate = "1970-01-01T00:00:00Z"
|
|
||||||
|
|
||||||
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
|
|
||||||
OAuthGrantTypeDeviceCode = "device_code"
|
|
||||||
|
|
||||||
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
|
|
||||||
OAuthGrantTypeClientCredentials = "client_credentials"
|
|
||||||
|
|
||||||
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
|
|
||||||
OAuthGrantTypeRefreshToken = "refresh_token"
|
|
||||||
)
|
|
||||||
|
|
||||||
var expirationBase time.Time
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenRefreshCallback is the type representing callbacks that will be called after
|
|
||||||
// a successful token refresh
|
|
||||||
type TokenRefreshCallback func(Token) error
|
|
||||||
|
|
||||||
// Token encapsulates the access token used to authorize Azure requests.
|
|
||||||
type Token struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
|
|
||||||
ExpiresIn string `json:"expires_in"`
|
|
||||||
ExpiresOn string `json:"expires_on"`
|
|
||||||
NotBefore string `json:"not_before"`
|
|
||||||
|
|
||||||
Resource string `json:"resource"`
|
|
||||||
Type string `json:"token_type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expires returns the time.Time when the Token expires.
|
|
||||||
func (t Token) Expires() time.Time {
|
|
||||||
s, err := strconv.Atoi(t.ExpiresOn)
|
|
||||||
if err != nil {
|
|
||||||
s = -3600
|
|
||||||
}
|
|
||||||
return expirationBase.Add(time.Duration(s) * time.Second).UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns true if the Token is expired, false otherwise.
|
|
||||||
func (t Token) IsExpired() bool {
|
|
||||||
return t.WillExpireIn(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
|
|
||||||
// from now, false otherwise.
|
|
||||||
func (t Token) WillExpireIn(d time.Duration) bool {
|
|
||||||
return !t.Expires().After(time.Now().Add(d))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Bearer " followed by the AccessToken of the Token.
|
|
||||||
func (t *Token) WithAuthorization() autorest.PrepareDecorator {
|
|
||||||
return func(p autorest.Preparer) autorest.Preparer {
|
|
||||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalNoSecret represents a secret type that contains no secret
|
|
||||||
// meaning it is not valid for fetching a fresh token. This is used by Manual
|
|
||||||
type ServicePrincipalNoSecret struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
|
|
||||||
// It only returns an error for the ServicePrincipalNoSecret type
|
|
||||||
func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
|
|
||||||
// that is submitted when acquiring an oAuth token.
|
|
||||||
type ServicePrincipalSecret interface {
|
|
||||||
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
|
|
||||||
type ServicePrincipalTokenSecret struct {
|
|
||||||
ClientSecret string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
|
|
||||||
func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
v.Set("client_secret", tokenSecret.ClientSecret)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
|
|
||||||
type ServicePrincipalCertificateSecret struct {
|
|
||||||
Certificate *x509.Certificate
|
|
||||||
PrivateKey *rsa.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignJwt returns the JWT signed with the certificate's private key.
|
|
||||||
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
|
|
||||||
hasher := sha1.New()
|
|
||||||
_, err := hasher.Write(secret.Certificate.Raw)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
|
|
||||||
|
|
||||||
// The jti (JWT ID) claim provides a unique identifier for the JWT.
|
|
||||||
jti := make([]byte, 20)
|
|
||||||
_, err = rand.Read(jti)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
token := jwt.New(jwt.SigningMethodRS256)
|
|
||||||
token.Header["x5t"] = thumbprint
|
|
||||||
token.Claims = jwt.MapClaims{
|
|
||||||
"aud": spt.oauthConfig.TokenEndpoint.String(),
|
|
||||||
"iss": spt.clientID,
|
|
||||||
"sub": spt.clientID,
|
|
||||||
"jti": base64.URLEncoding.EncodeToString(jti),
|
|
||||||
"nbf": time.Now().Unix(),
|
|
||||||
"exp": time.Now().Add(time.Hour * 24).Unix(),
|
|
||||||
}
|
|
||||||
|
|
||||||
signedString, err := token.SignedString(secret.PrivateKey)
|
|
||||||
return signedString, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
|
||||||
// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
|
|
||||||
func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
|
||||||
jwt, err := secret.SignJwt(spt)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set("client_assertion", jwt)
|
|
||||||
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
|
|
||||||
type ServicePrincipalToken struct {
|
|
||||||
Token
|
|
||||||
|
|
||||||
secret ServicePrincipalSecret
|
|
||||||
oauthConfig OAuthConfig
|
|
||||||
clientID string
|
|
||||||
resource string
|
|
||||||
autoRefresh bool
|
|
||||||
refreshWithin time.Duration
|
|
||||||
sender autorest.Sender
|
|
||||||
|
|
||||||
refreshCallbacks []TokenRefreshCallback
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
|
|
||||||
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
spt := &ServicePrincipalToken{
|
|
||||||
oauthConfig: oauthConfig,
|
|
||||||
secret: secret,
|
|
||||||
clientID: id,
|
|
||||||
resource: resource,
|
|
||||||
autoRefresh: true,
|
|
||||||
refreshWithin: defaultRefresh,
|
|
||||||
sender: &http.Client{},
|
|
||||||
refreshCallbacks: callbacks,
|
|
||||||
}
|
|
||||||
return spt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
|
|
||||||
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
spt, err := NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalNoSecret{},
|
|
||||||
callbacks...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
spt.Token = token
|
|
||||||
|
|
||||||
return spt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
|
|
||||||
// credentials scoped to the named resource.
|
|
||||||
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
return NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalTokenSecret{
|
|
||||||
ClientSecret: secret,
|
|
||||||
},
|
|
||||||
callbacks...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes.
|
|
||||||
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
|
||||||
return NewServicePrincipalTokenWithSecret(
|
|
||||||
oauthConfig,
|
|
||||||
clientID,
|
|
||||||
resource,
|
|
||||||
&ServicePrincipalCertificateSecret{
|
|
||||||
PrivateKey: privateKey,
|
|
||||||
Certificate: certificate,
|
|
||||||
},
|
|
||||||
callbacks...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
|
|
||||||
// RefreshWithin).
|
|
||||||
func (spt *ServicePrincipalToken) EnsureFresh() error {
|
|
||||||
if spt.WillExpireIn(spt.refreshWithin) {
|
|
||||||
return spt.Refresh()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
|
|
||||||
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
|
|
||||||
if spt.refreshCallbacks != nil {
|
|
||||||
for _, callback := range spt.refreshCallbacks {
|
|
||||||
err := callback(spt.Token)
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err,
|
|
||||||
"azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Refresh obtains a fresh token for the Service Principal.
|
|
||||||
func (spt *ServicePrincipalToken) Refresh() error {
|
|
||||||
return spt.refreshInternal(spt.resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshExchange refreshes the token, but for a different resource.
|
|
||||||
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
|
|
||||||
return spt.refreshInternal(resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
|
||||||
v := url.Values{}
|
|
||||||
v.Set("client_id", spt.clientID)
|
|
||||||
v.Set("resource", resource)
|
|
||||||
|
|
||||||
if spt.RefreshToken != "" {
|
|
||||||
v.Set("grant_type", OAuthGrantTypeRefreshToken)
|
|
||||||
v.Set("refresh_token", spt.RefreshToken)
|
|
||||||
} else {
|
|
||||||
v.Set("grant_type", OAuthGrantTypeClientCredentials)
|
|
||||||
err := spt.secret.SetAuthenticationValues(spt, &v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req, _ := autorest.Prepare(&http.Request{},
|
|
||||||
autorest.AsPost(),
|
|
||||||
autorest.AsFormURLEncoded(),
|
|
||||||
autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()),
|
|
||||||
autorest.WithFormData(v))
|
|
||||||
|
|
||||||
resp, err := autorest.SendWithSender(spt.sender, req)
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err,
|
|
||||||
"azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s",
|
|
||||||
spt.clientID)
|
|
||||||
}
|
|
||||||
|
|
||||||
var newToken Token
|
|
||||||
err = autorest.Respond(resp,
|
|
||||||
autorest.WithErrorUnlessStatusCode(http.StatusOK),
|
|
||||||
autorest.ByUnmarshallingJSON(&newToken),
|
|
||||||
autorest.ByClosing())
|
|
||||||
if err != nil {
|
|
||||||
return autorest.NewErrorWithError(err,
|
|
||||||
"azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request",
|
|
||||||
spt.clientID)
|
|
||||||
}
|
|
||||||
|
|
||||||
spt.Token = newToken
|
|
||||||
|
|
||||||
err = spt.InvokeRefreshCallbacks(newToken)
|
|
||||||
if err != nil {
|
|
||||||
// its already wrapped inside InvokeRefreshCallbacks
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
|
|
||||||
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
|
|
||||||
spt.autoRefresh = autoRefresh
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
|
|
||||||
// refresh the token.
|
|
||||||
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
|
|
||||||
spt.refreshWithin = d
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An
|
|
||||||
// undecorated http.Client is used by default.
|
|
||||||
func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) {
|
|
||||||
spt.sender = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
|
||||||
// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken.
|
|
||||||
//
|
|
||||||
// By default, the token will automatically refresh if nearly expired (as determined by the
|
|
||||||
// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing
|
|
||||||
// tokens.
|
|
||||||
func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator {
|
|
||||||
return func(p autorest.Preparer) autorest.Preparer {
|
|
||||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
|
||||||
if spt.autoRefresh {
|
|
||||||
err := spt.EnsureFresh()
|
|
||||||
if err != nil {
|
|
||||||
return r, autorest.NewErrorWithError(err,
|
|
||||||
"azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s",
|
|
||||||
r.URL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
47
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
47
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -21,6 +35,9 @@ const (
|
||||||
|
|
||||||
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
|
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
|
||||||
DefaultRetryAttempts = 3
|
DefaultRetryAttempts = 3
|
||||||
|
|
||||||
|
// DefaultRetryDuration is the duration to wait between retries.
|
||||||
|
DefaultRetryDuration = 30 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -33,8 +50,10 @@ var (
|
||||||
Version(),
|
Version(),
|
||||||
)
|
)
|
||||||
|
|
||||||
statusCodesForRetry = []int{
|
// StatusCodesForRetry are a defined group of status code for which the client will retry
|
||||||
|
StatusCodesForRetry = []int{
|
||||||
http.StatusRequestTimeout, // 408
|
http.StatusRequestTimeout, // 408
|
||||||
|
http.StatusTooManyRequests, // 429
|
||||||
http.StatusInternalServerError, // 500
|
http.StatusInternalServerError, // 500
|
||||||
http.StatusBadGateway, // 502
|
http.StatusBadGateway, // 502
|
||||||
http.StatusServiceUnavailable, // 503
|
http.StatusServiceUnavailable, // 503
|
||||||
|
@ -147,6 +166,9 @@ type Client struct {
|
||||||
UserAgent string
|
UserAgent string
|
||||||
|
|
||||||
Jar http.CookieJar
|
Jar http.CookieJar
|
||||||
|
|
||||||
|
// Set to true to skip attempted registration of resource providers (false by default).
|
||||||
|
SkipResourceProviderRegistration bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
|
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
|
||||||
|
@ -156,9 +178,10 @@ func NewClientWithUserAgent(ua string) Client {
|
||||||
PollingDelay: DefaultPollingDelay,
|
PollingDelay: DefaultPollingDelay,
|
||||||
PollingDuration: DefaultPollingDuration,
|
PollingDuration: DefaultPollingDuration,
|
||||||
RetryAttempts: DefaultRetryAttempts,
|
RetryAttempts: DefaultRetryAttempts,
|
||||||
RetryDuration: 30 * time.Second,
|
RetryDuration: DefaultRetryDuration,
|
||||||
UserAgent: defaultUserAgent,
|
UserAgent: defaultUserAgent,
|
||||||
}
|
}
|
||||||
|
c.Sender = c.sender()
|
||||||
c.AddToUserAgent(ua)
|
c.AddToUserAgent(ua)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
@ -180,16 +203,22 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
|
||||||
r, _ = Prepare(r,
|
r, _ = Prepare(r,
|
||||||
WithUserAgent(c.UserAgent))
|
WithUserAgent(c.UserAgent))
|
||||||
}
|
}
|
||||||
|
// NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations
|
||||||
r, err := Prepare(r,
|
r, err := Prepare(r,
|
||||||
c.WithInspection(),
|
c.WithAuthorization(),
|
||||||
c.WithAuthorization())
|
c.WithInspection())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
|
var resp *http.Response
|
||||||
|
if detErr, ok := err.(DetailedError); ok {
|
||||||
|
// if the authorization failed (e.g. invalid credentials) there will
|
||||||
|
// be a response associated with the error, be sure to return it.
|
||||||
|
resp = detErr.Response
|
||||||
|
}
|
||||||
|
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
|
||||||
}
|
}
|
||||||
resp, err := SendWithSender(c.sender(), r,
|
|
||||||
DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...))
|
resp, err := SendWithSender(c.sender(), r)
|
||||||
Respond(resp,
|
Respond(resp, c.ByInspecting())
|
||||||
c.ByInspecting())
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
14
vendor/github.com/Azure/go-autorest/autorest/date/date.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/date/date.go
generated
vendored
|
@ -5,6 +5,20 @@ time.Time types. And both convert to time.Time through a ToTime method.
|
||||||
*/
|
*/
|
||||||
package date
|
package date
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
14
vendor/github.com/Azure/go-autorest/autorest/date/time.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/date/time.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package date
|
package date
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
14
vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package date
|
package date
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
123
vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go
generated
vendored
Normal file
123
vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
package date
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unixEpoch is the moment in time that should be treated as timestamp 0.
|
||||||
|
var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
|
// UnixTime marshals and unmarshals a time that is represented as the number
|
||||||
|
// of seconds (ignoring skip-seconds) since the Unix Epoch.
|
||||||
|
type UnixTime time.Time
|
||||||
|
|
||||||
|
// Duration returns the time as a Duration since the UnixEpoch.
|
||||||
|
func (t UnixTime) Duration() time.Duration {
|
||||||
|
return time.Time(t).Sub(unixEpoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch.
|
||||||
|
func NewUnixTimeFromSeconds(seconds float64) UnixTime {
|
||||||
|
return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch.
|
||||||
|
func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime {
|
||||||
|
return NewUnixTimeFromDuration(time.Duration(nanoseconds))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch.
|
||||||
|
func NewUnixTimeFromDuration(dur time.Duration) UnixTime {
|
||||||
|
return UnixTime(unixEpoch.Add(dur))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0'
|
||||||
|
func UnixEpoch() time.Time {
|
||||||
|
return unixEpoch
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements.
|
||||||
|
// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.)
|
||||||
|
func (t UnixTime) MarshalJSON() ([]byte, error) {
|
||||||
|
buffer := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(buffer)
|
||||||
|
err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since
|
||||||
|
// midnight January 1st, 1970.
|
||||||
|
func (t *UnixTime) UnmarshalJSON(text []byte) error {
|
||||||
|
dec := json.NewDecoder(bytes.NewReader(text))
|
||||||
|
|
||||||
|
var secondsSinceEpoch float64
|
||||||
|
if err := dec.Decode(&secondsSinceEpoch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*t = NewUnixTimeFromSeconds(secondsSinceEpoch)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number.
|
||||||
|
func (t UnixTime) MarshalText() ([]byte, error) {
|
||||||
|
cast := time.Time(t)
|
||||||
|
return cast.MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch.
|
||||||
|
func (t *UnixTime) UnmarshalText(raw []byte) error {
|
||||||
|
var unmarshaled time.Time
|
||||||
|
|
||||||
|
if err := unmarshaled.UnmarshalText(raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*t = UnixTime(unmarshaled)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch.
|
||||||
|
func (t UnixTime) MarshalBinary() ([]byte, error) {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
|
payload := int64(t.Duration())
|
||||||
|
|
||||||
|
if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime.
|
||||||
|
func (t *UnixTime) UnmarshalBinary(raw []byte) error {
|
||||||
|
var nanosecondsSinceEpoch int64
|
||||||
|
|
||||||
|
if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch)
|
||||||
|
return nil
|
||||||
|
}
|
14
vendor/github.com/Azure/go-autorest/autorest/date/utility.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/date/utility.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package date
|
package date
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
18
vendor/github.com/Azure/go-autorest/autorest/error.go
generated
vendored
18
vendor/github.com/Azure/go-autorest/autorest/error.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -31,6 +45,9 @@ type DetailedError struct {
|
||||||
|
|
||||||
// Service Error is the response body of failed API in bytes
|
// Service Error is the response body of failed API in bytes
|
||||||
ServiceError []byte
|
ServiceError []byte
|
||||||
|
|
||||||
|
// Response is the response object that was returned during failure if applicable.
|
||||||
|
Response *http.Response
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewError creates a new Error conforming object from the passed packageType, method, and
|
// NewError creates a new Error conforming object from the passed packageType, method, and
|
||||||
|
@ -67,6 +84,7 @@ func NewErrorWithError(original error, packageType string, method string, resp *
|
||||||
Method: method,
|
Method: method,
|
||||||
StatusCode: statusCode,
|
StatusCode: statusCode,
|
||||||
Message: fmt.Sprintf(message, args...),
|
Message: fmt.Sprintf(message, args...),
|
||||||
|
Response: resp,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
75
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
75
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -13,8 +27,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
mimeTypeJSON = "application/json"
|
mimeTypeJSON = "application/json"
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
mimeTypeOctetStream = "application/octet-stream"
|
||||||
|
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
||||||
|
|
||||||
headerAuthorization = "Authorization"
|
headerAuthorization = "Authorization"
|
||||||
headerContentType = "Content-Type"
|
headerContentType = "Content-Type"
|
||||||
|
@ -98,6 +113,28 @@ func WithHeader(header string, value string) PrepareDecorator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to
|
||||||
|
// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before
|
||||||
|
// adding them.
|
||||||
|
func WithHeaders(headers map[string]interface{}) PrepareDecorator {
|
||||||
|
h := ensureValueStrings(headers)
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err == nil {
|
||||||
|
if r.Header == nil {
|
||||||
|
r.Header = make(http.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, value := range h {
|
||||||
|
r.Header.Set(http.CanonicalHeaderKey(name), value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||||
// value is "Bearer " followed by the supplied token.
|
// value is "Bearer " followed by the supplied token.
|
||||||
func WithBearerAuthorization(token string) PrepareDecorator {
|
func WithBearerAuthorization(token string) PrepareDecorator {
|
||||||
|
@ -128,6 +165,11 @@ func AsJSON() PrepareDecorator {
|
||||||
return AsContentType(mimeTypeJSON)
|
return AsContentType(mimeTypeJSON)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header.
|
||||||
|
func AsOctetStream() PrepareDecorator {
|
||||||
|
return AsContentType(mimeTypeOctetStream)
|
||||||
|
}
|
||||||
|
|
||||||
// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
|
// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
|
||||||
// decorator does not validate that the passed method string is a known HTTP method.
|
// decorator does not validate that the passed method string is a known HTTP method.
|
||||||
func WithMethod(method string) PrepareDecorator {
|
func WithMethod(method string) PrepareDecorator {
|
||||||
|
@ -201,6 +243,11 @@ func WithFormData(v url.Values) PrepareDecorator {
|
||||||
r, err := p.Prepare(r)
|
r, err := p.Prepare(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
s := v.Encode()
|
s := v.Encode()
|
||||||
|
|
||||||
|
if r.Header == nil {
|
||||||
|
r.Header = make(http.Header)
|
||||||
|
}
|
||||||
|
r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
|
||||||
r.ContentLength = int64(len(s))
|
r.ContentLength = int64(len(s))
|
||||||
r.Body = ioutil.NopCloser(strings.NewReader(s))
|
r.Body = ioutil.NopCloser(strings.NewReader(s))
|
||||||
}
|
}
|
||||||
|
@ -416,28 +463,18 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato
|
||||||
if r.URL == nil {
|
if r.URL == nil {
|
||||||
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
|
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
v := r.URL.Query()
|
v := r.URL.Query()
|
||||||
for key, value := range parameters {
|
for key, value := range parameters {
|
||||||
v.Add(key, value)
|
d, err := url.QueryUnescape(value)
|
||||||
|
if err != nil {
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
v.Add(key, d)
|
||||||
}
|
}
|
||||||
r.URL.RawQuery = createQuery(v)
|
r.URL.RawQuery = v.Encode()
|
||||||
}
|
}
|
||||||
return r, err
|
return r, err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
|
||||||
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
|
||||||
// state of the formed HTTP request.
|
|
||||||
type Authorizer interface {
|
|
||||||
WithAuthorization() PrepareDecorator
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
|
||||||
type NullAuthorizer struct{}
|
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that does nothing.
|
|
||||||
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
|
|
||||||
return WithNothing()
|
|
||||||
}
|
|
||||||
|
|
14
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
|
@ -1,5 +1,19 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
52
vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
generated
vendored
Normal file
52
vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic.
|
||||||
|
func NewRetriableRequest(req *http.Request) *RetriableRequest {
|
||||||
|
return &RetriableRequest{req: req}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request returns the wrapped HTTP request.
|
||||||
|
func (rr *RetriableRequest) Request() *http.Request {
|
||||||
|
return rr.req
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *RetriableRequest) prepareFromByteReader() (err error) {
|
||||||
|
// fall back to making a copy (only do this once)
|
||||||
|
b := []byte{}
|
||||||
|
if rr.req.ContentLength > 0 {
|
||||||
|
b = make([]byte, rr.req.ContentLength)
|
||||||
|
_, err = io.ReadFull(rr.req.Body, b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b, err = ioutil.ReadAll(rr.req.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rr.br = bytes.NewReader(b)
|
||||||
|
rr.req.Body = ioutil.NopCloser(rr.br)
|
||||||
|
return err
|
||||||
|
}
|
54
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
generated
vendored
Normal file
54
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// +build !go1.8
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package autorest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetriableRequest provides facilities for retrying an HTTP request.
|
||||||
|
type RetriableRequest struct {
|
||||||
|
req *http.Request
|
||||||
|
br *bytes.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare signals that the request is about to be sent.
|
||||||
|
func (rr *RetriableRequest) Prepare() (err error) {
|
||||||
|
// preserve the request body; this is to support retry logic as
|
||||||
|
// the underlying transport will always close the reqeust body
|
||||||
|
if rr.req.Body != nil {
|
||||||
|
if rr.br != nil {
|
||||||
|
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
|
||||||
|
rr.req.Body = ioutil.NopCloser(rr.br)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rr.br == nil {
|
||||||
|
// fall back to making a copy (only do this once)
|
||||||
|
err = rr.prepareFromByteReader()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeRequestBody(req *http.Request) {
|
||||||
|
req.Body = nil
|
||||||
|
req.ContentLength = 0
|
||||||
|
}
|
66
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
generated
vendored
Normal file
66
vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package autorest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetriableRequest provides facilities for retrying an HTTP request.
|
||||||
|
type RetriableRequest struct {
|
||||||
|
req *http.Request
|
||||||
|
rc io.ReadCloser
|
||||||
|
br *bytes.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare signals that the request is about to be sent.
|
||||||
|
func (rr *RetriableRequest) Prepare() (err error) {
|
||||||
|
// preserve the request body; this is to support retry logic as
|
||||||
|
// the underlying transport will always close the reqeust body
|
||||||
|
if rr.req.Body != nil {
|
||||||
|
if rr.rc != nil {
|
||||||
|
rr.req.Body = rr.rc
|
||||||
|
} else if rr.br != nil {
|
||||||
|
_, err = rr.br.Seek(0, io.SeekStart)
|
||||||
|
rr.req.Body = ioutil.NopCloser(rr.br)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rr.req.GetBody != nil {
|
||||||
|
// this will allow us to preserve the body without having to
|
||||||
|
// make a copy. note we need to do this on each iteration
|
||||||
|
rr.rc, err = rr.req.GetBody()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if rr.br == nil {
|
||||||
|
// fall back to making a copy (only do this once)
|
||||||
|
err = rr.prepareFromByteReader()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeRequestBody(req *http.Request) {
|
||||||
|
req.Body = nil
|
||||||
|
req.GetBody = nil
|
||||||
|
req.ContentLength = 0
|
||||||
|
}
|
95
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
95
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
|
@ -1,12 +1,25 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -73,7 +86,7 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht
|
||||||
func AfterDelay(d time.Duration) SendDecorator {
|
func AfterDelay(d time.Duration) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
if !DelayForBackoff(d, 0, r.Cancel) {
|
if !DelayForBackoff(d, 0, r.Context().Done()) {
|
||||||
return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
|
return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
|
||||||
}
|
}
|
||||||
return s.Do(r)
|
return s.Do(r)
|
||||||
|
@ -152,7 +165,7 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...
|
||||||
resp, err = s.Do(r)
|
resp, err = s.Do(r)
|
||||||
|
|
||||||
if err == nil && ResponseHasStatusCode(resp, codes...) {
|
if err == nil && ResponseHasStatusCode(resp, codes...) {
|
||||||
r, err = NewPollingRequest(resp, r.Cancel)
|
r, err = NewPollingRequestWithContext(r.Context(), resp)
|
||||||
|
|
||||||
for err == nil && ResponseHasStatusCode(resp, codes...) {
|
for err == nil && ResponseHasStatusCode(resp, codes...) {
|
||||||
Respond(resp,
|
Respond(resp,
|
||||||
|
@ -175,12 +188,19 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...
|
||||||
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := NewRetriableRequest(r)
|
||||||
for attempt := 0; attempt < attempts; attempt++ {
|
for attempt := 0; attempt < attempts; attempt++ {
|
||||||
resp, err = s.Do(r)
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = s.Do(rr.Request())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
if !DelayForBackoff(backoff, attempt, r.Context().Done()) {
|
||||||
|
return nil, r.Context().Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
|
@ -194,29 +214,57 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
b := []byte{}
|
rr := NewRetriableRequest(r)
|
||||||
if r.Body != nil {
|
// Increment to add the first call (attempts denotes number of retries)
|
||||||
b, err = ioutil.ReadAll(r.Body)
|
attempts++
|
||||||
|
for attempt := 0; attempt < attempts; {
|
||||||
|
err = rr.Prepare()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
}
|
resp, err = s.Do(rr.Request())
|
||||||
|
// if the error isn't temporary don't bother retrying
|
||||||
// Increment to add the first call (attempts denotes number of retries)
|
if err != nil && !IsTemporaryNetworkError(err) {
|
||||||
attempts++
|
return nil, err
|
||||||
for attempt := 0; attempt < attempts; attempt++ {
|
}
|
||||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
|
||||||
resp, err = s.Do(r)
|
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
|
||||||
if err != nil || !ResponseHasStatusCode(resp, codes...) {
|
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
||||||
|
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
|
||||||
|
return nil, r.Context().Err()
|
||||||
|
}
|
||||||
|
// don't count a 429 against the number of attempts
|
||||||
|
// so that we continue to retry until it succeeds
|
||||||
|
if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
|
||||||
|
attempt++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
|
||||||
|
// responses with status code 429
|
||||||
|
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
|
||||||
|
if resp == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Duration(retryAfter) * time.Second):
|
||||||
|
return true
|
||||||
|
case <-cancel:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
|
// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
|
||||||
// to or greater than the specified duration, exponentially backing off between requests using the
|
// to or greater than the specified duration, exponentially backing off between requests using the
|
||||||
// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
|
// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
|
||||||
|
@ -224,13 +272,20 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
|
||||||
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
|
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||||
|
rr := NewRetriableRequest(r)
|
||||||
end := time.Now().Add(d)
|
end := time.Now().Add(d)
|
||||||
for attempt := 0; time.Now().Before(end); attempt++ {
|
for attempt := 0; time.Now().Before(end); attempt++ {
|
||||||
resp, err = s.Do(r)
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
resp, err = s.Do(rr.Request())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
if !DelayForBackoff(backoff, attempt, r.Context().Done()) {
|
||||||
|
return nil, r.Context().Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
|
|
107
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
107
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
|
@ -1,15 +1,32 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
|
// Copyright 2017 Microsoft Corporation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EncodedAs is a series of constants specifying various data encodings
|
// EncodedAs is a series of constants specifying various data encodings
|
||||||
|
@ -123,13 +140,38 @@ func MapToValues(m map[string]interface{}) url.Values {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// String method converts interface v to string. If interface is a list, it
|
// AsStringSlice method converts interface{} to []string. This expects a
|
||||||
// joins list elements using separator.
|
//that the parameter passed to be a slice or array of a type that has the underlying
|
||||||
func String(v interface{}, sep ...string) string {
|
//type a string.
|
||||||
if len(sep) > 0 {
|
func AsStringSlice(s interface{}) ([]string, error) {
|
||||||
return ensureValueString(strings.Join(v.([]string), sep[0]))
|
v := reflect.ValueOf(s)
|
||||||
|
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
|
||||||
|
return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.")
|
||||||
}
|
}
|
||||||
return ensureValueString(v)
|
stringSlice := make([]string, 0, v.Len())
|
||||||
|
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
stringSlice = append(stringSlice, v.Index(i).String())
|
||||||
|
}
|
||||||
|
return stringSlice, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String method converts interface v to string. If interface is a list, it
|
||||||
|
// joins list elements using the seperator. Note that only sep[0] will be used for
|
||||||
|
// joining if any separator is specified.
|
||||||
|
func String(v interface{}, sep ...string) string {
|
||||||
|
if len(sep) == 0 {
|
||||||
|
return ensureValueString(v)
|
||||||
|
}
|
||||||
|
stringSlice, ok := v.([]string)
|
||||||
|
if ok == false {
|
||||||
|
var err error
|
||||||
|
stringSlice, err = AsStringSlice(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ensureValueString(strings.Join(stringSlice, sep[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode method encodes url path and query parameters.
|
// Encode method encodes url path and query parameters.
|
||||||
|
@ -153,26 +195,33 @@ func queryEscape(s string) string {
|
||||||
return url.QueryEscape(s)
|
return url.QueryEscape(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This method is same as Encode() method of "net/url" go package,
|
// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't).
|
||||||
// except it does not encode the query parameters because they
|
// This is mainly useful for long-running operations that use the Azure-AsyncOperation
|
||||||
// already come encoded. It formats values map in query format (bar=foo&a=b).
|
// header, so we change the initial PUT into a GET to retrieve the final result.
|
||||||
func createQuery(v url.Values) string {
|
func ChangeToGet(req *http.Request) *http.Request {
|
||||||
var buf bytes.Buffer
|
req.Method = "GET"
|
||||||
keys := make([]string, 0, len(v))
|
req.Body = nil
|
||||||
for k := range v {
|
req.ContentLength = 0
|
||||||
keys = append(keys, k)
|
req.Header.Del("Content-Length")
|
||||||
}
|
return req
|
||||||
sort.Strings(keys)
|
}
|
||||||
for _, k := range keys {
|
|
||||||
vs := v[k]
|
// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError
|
||||||
prefix := url.QueryEscape(k) + "="
|
// interface. If err is a DetailedError it will walk the chain of Original errors.
|
||||||
for _, v := range vs {
|
func IsTokenRefreshError(err error) bool {
|
||||||
if buf.Len() > 0 {
|
if _, ok := err.(adal.TokenRefreshError); ok {
|
||||||
buf.WriteByte('&')
|
return true
|
||||||
}
|
}
|
||||||
buf.WriteString(prefix)
|
if de, ok := err.(DetailedError); ok {
|
||||||
buf.WriteString(v)
|
return IsTokenRefreshError(de.Original)
|
||||||
}
|
}
|
||||||
}
|
return false
|
||||||
return buf.String()
|
}
|
||||||
|
|
||||||
|
// IsTemporaryNetworkError returns true if the specified error is a temporary network error.
|
||||||
|
func IsTemporaryNetworkError(err error) bool {
|
||||||
|
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
31
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
31
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
|
@ -1,23 +1,20 @@
|
||||||
package autorest
|
package autorest
|
||||||
|
|
||||||
import (
|
// Copyright 2017 Microsoft Corporation
|
||||||
"fmt"
|
//
|
||||||
)
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
const (
|
// You may obtain a copy of the License at
|
||||||
major = "7"
|
//
|
||||||
minor = "3"
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
patch = "0"
|
//
|
||||||
tag = ""
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
semVerFormat = "%s.%s.%s%s"
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
)
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
var version string
|
// limitations under the License.
|
||||||
|
|
||||||
// Version returns the semantic version (see http://semver.org).
|
// Version returns the semantic version (see http://semver.org).
|
||||||
func Version() string {
|
func Version() string {
|
||||||
if version == "" {
|
return "v10.8.1"
|
||||||
version = fmt.Sprintf(semVerFormat, major, minor, patch, tag)
|
|
||||||
}
|
|
||||||
return version
|
|
||||||
}
|
}
|
||||||
|
|
21
vendor/github.com/marstr/guid/LICENSE.txt
generated
vendored
Normal file
21
vendor/github.com/marstr/guid/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016 Martin Strobel
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
27
vendor/github.com/marstr/guid/README.md
generated
vendored
Normal file
27
vendor/github.com/marstr/guid/README.md
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
[![Build Status](https://travis-ci.org/marstr/guid.svg?branch=master)](https://travis-ci.org/marstr/guid)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/marstr/guid?status.svg)](https://godoc.org/github.com/marstr/guid)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/marstr/guid)](https://goreportcard.com/report/github.com/marstr/guid)
|
||||||
|
|
||||||
|
# Guid
|
||||||
|
Globally unique identifiers offer a quick means of generating non-colliding values across a distributed system. For this implemenation, [RFC 4122](http://ietf.org/rfc/rfc4122.txt) governs the desired behavior.
|
||||||
|
|
||||||
|
## What's in a name?
|
||||||
|
You have likely already noticed that RFC and some implementations refer to these structures as UUIDs (Universally Unique Identifiers), where as this project is annotated as GUIDs (Globally Unique Identifiers). The name Guid was selected to make clear this project's ties to the [.NET struct Guid.](https://msdn.microsoft.com/en-us/library/system.guid(v=vs.110).aspx) The most obvious relationship is the desire to have the same format specifiers available in this library's Format and Parse methods as .NET would have in its ToString and Parse methods.
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
- Ensure you have the [Go Programming Language](https://golang.org/) installed on your system.
|
||||||
|
- Run the command: `go get -u github.com/marstr/guid`
|
||||||
|
|
||||||
|
# Contribution
|
||||||
|
Contributions are welcome! Feel free to send Pull Requests. Continuous Integration will ensure that you have conformed to Go conventions. Please remember to add tests for your changes.
|
||||||
|
|
||||||
|
# Versioning
|
||||||
|
This library will adhere to the
|
||||||
|
[Semantic Versioning 2.0.0](http://semver.org/spec/v2.0.0.html) specification. It may be worth noting this should allow for tools like [glide](https://glide.readthedocs.io/en/latest/) to pull in this library with ease.
|
||||||
|
|
||||||
|
The Release Notes portion of this file will be updated to reflect the most recent major/minor updates, with the option to tag particular bug-fixes as well. Updates to the Release Notes for patches should be addative, where as major/minor updates should replace the previous version. If one desires to see the release notes for an older version, checkout that version of code and open this file.
|
||||||
|
|
||||||
|
# Release Notes 1.1.*
|
||||||
|
|
||||||
|
## v1.1.0
|
||||||
|
Adding support for JSON marshaling and unmarshaling.
|
301
vendor/github.com/marstr/guid/guid.go
generated
vendored
Normal file
301
vendor/github.com/marstr/guid/guid.go
generated
vendored
Normal file
|
@ -0,0 +1,301 @@
|
||||||
|
package guid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GUID is a unique identifier designed to virtually guarantee non-conflict between values generated
|
||||||
|
// across a distributed system.
|
||||||
|
type GUID struct {
|
||||||
|
timeHighAndVersion uint16
|
||||||
|
timeMid uint16
|
||||||
|
timeLow uint32
|
||||||
|
clockSeqHighAndReserved uint8
|
||||||
|
clockSeqLow uint8
|
||||||
|
node [6]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format enumerates the values that are supported by Parse and Format
|
||||||
|
type Format string
|
||||||
|
|
||||||
|
// These constants define the possible string formats available via this implementation of Guid.
|
||||||
|
const (
|
||||||
|
FormatB Format = "B" // {00000000-0000-0000-0000-000000000000}
|
||||||
|
FormatD Format = "D" // 00000000-0000-0000-0000-000000000000
|
||||||
|
FormatN Format = "N" // 00000000000000000000000000000000
|
||||||
|
FormatP Format = "P" // (00000000-0000-0000-0000-000000000000)
|
||||||
|
FormatX Format = "X" // {0x00000000,0x0000,0x0000,{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}}
|
||||||
|
FormatDefault Format = FormatD
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreationStrategy enumerates the values that are supported for populating the bits of a new Guid.
|
||||||
|
type CreationStrategy string
|
||||||
|
|
||||||
|
// These constants define the possible creation strategies available via this implementation of Guid.
|
||||||
|
const (
|
||||||
|
CreationStrategyVersion1 CreationStrategy = "version1"
|
||||||
|
CreationStrategyVersion2 CreationStrategy = "version2"
|
||||||
|
CreationStrategyVersion3 CreationStrategy = "version3"
|
||||||
|
CreationStrategyVersion4 CreationStrategy = "version4"
|
||||||
|
CreationStrategyVersion5 CreationStrategy = "version5"
|
||||||
|
)
|
||||||
|
|
||||||
|
var emptyGUID GUID
|
||||||
|
|
||||||
|
// NewGUID generates and returns a new globally unique identifier
|
||||||
|
func NewGUID() GUID {
|
||||||
|
result, err := version4()
|
||||||
|
if err != nil {
|
||||||
|
panic(err) //Version 4 (pseudo-random GUID) doesn't use anything that could fail.
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
var knownStrategies = map[CreationStrategy]func() (GUID, error){
|
||||||
|
CreationStrategyVersion1: version1,
|
||||||
|
CreationStrategyVersion4: version4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGUIDs generates and returns a new globally unique identifier that conforms to the given strategy.
|
||||||
|
func NewGUIDs(strategy CreationStrategy) (GUID, error) {
|
||||||
|
if creator, present := knownStrategies[strategy]; present {
|
||||||
|
result, err := creator()
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
return emptyGUID, errors.New("Unsupported CreationStrategy")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns a copy of the default and empty GUID.
|
||||||
|
func Empty() GUID {
|
||||||
|
return emptyGUID
|
||||||
|
}
|
||||||
|
|
||||||
|
var knownFormats = map[Format]string{
|
||||||
|
FormatN: "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x",
|
||||||
|
FormatD: "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
|
||||||
|
FormatB: "{%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}",
|
||||||
|
FormatP: "(%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x)",
|
||||||
|
FormatX: "{0x%08x,0x%04x,0x%04x,{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x}}",
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON writes a GUID as a JSON string.
|
||||||
|
func (guid GUID) MarshalJSON() (marshaled []byte, err error) {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
|
||||||
|
_, err = buf.WriteRune('"')
|
||||||
|
buf.WriteString(guid.String())
|
||||||
|
buf.WriteRune('"')
|
||||||
|
|
||||||
|
marshaled = buf.Bytes()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse instantiates a GUID from a text representation of the same GUID.
|
||||||
|
// This is the inverse of function family String()
|
||||||
|
func Parse(value string) (GUID, error) {
|
||||||
|
var guid GUID
|
||||||
|
for _, fullFormat := range knownFormats {
|
||||||
|
parity, err := fmt.Sscanf(
|
||||||
|
value,
|
||||||
|
fullFormat,
|
||||||
|
&guid.timeLow,
|
||||||
|
&guid.timeMid,
|
||||||
|
&guid.timeHighAndVersion,
|
||||||
|
&guid.clockSeqHighAndReserved,
|
||||||
|
&guid.clockSeqLow,
|
||||||
|
&guid.node[0],
|
||||||
|
&guid.node[1],
|
||||||
|
&guid.node[2],
|
||||||
|
&guid.node[3],
|
||||||
|
&guid.node[4],
|
||||||
|
&guid.node[5])
|
||||||
|
if parity == 11 && err == nil {
|
||||||
|
return guid, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return emptyGUID, fmt.Errorf("\"%s\" is not in a recognized format", value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a text representation of a GUID in the default format.
|
||||||
|
func (guid GUID) String() string {
|
||||||
|
return guid.Stringf(FormatDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stringf returns a text representation of a GUID that conforms to the specified format.
|
||||||
|
// If an unrecognized format is provided, the empty string is returned.
|
||||||
|
func (guid GUID) Stringf(format Format) string {
|
||||||
|
if format == "" {
|
||||||
|
format = FormatDefault
|
||||||
|
}
|
||||||
|
fullFormat, present := knownFormats[format]
|
||||||
|
if !present {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(
|
||||||
|
fullFormat,
|
||||||
|
guid.timeLow,
|
||||||
|
guid.timeMid,
|
||||||
|
guid.timeHighAndVersion,
|
||||||
|
guid.clockSeqHighAndReserved,
|
||||||
|
guid.clockSeqLow,
|
||||||
|
guid.node[0],
|
||||||
|
guid.node[1],
|
||||||
|
guid.node[2],
|
||||||
|
guid.node[3],
|
||||||
|
guid.node[4],
|
||||||
|
guid.node[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON parses a GUID from a JSON string token.
|
||||||
|
func (guid *GUID) UnmarshalJSON(marshaled []byte) (err error) {
|
||||||
|
if len(marshaled) < 2 {
|
||||||
|
err = errors.New("JSON GUID must be surrounded by quotes")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stripped := marshaled[1 : len(marshaled)-1]
|
||||||
|
*guid, err = Parse(string(stripped))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version reads a GUID to parse which mechanism of generating GUIDS was employed.
|
||||||
|
// Values returned here are documented in rfc4122.txt.
|
||||||
|
func (guid GUID) Version() uint {
|
||||||
|
return uint(guid.timeHighAndVersion >> 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
var unixToGregorianOffset = time.Date(1970, 01, 01, 0, 0, 00, 0, time.UTC).Sub(time.Date(1582, 10, 15, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
// getRFC4122Time returns a 60-bit count of 100-nanosecond intervals since 00:00:00.00 October 15th, 1582
|
||||||
|
func getRFC4122Time() int64 {
|
||||||
|
currentTime := time.Now().UTC().Add(unixToGregorianOffset).UnixNano()
|
||||||
|
currentTime /= 100
|
||||||
|
return currentTime & 0x0FFFFFFFFFFFFFFF
|
||||||
|
}
|
||||||
|
|
||||||
|
var clockSeqVal uint16
|
||||||
|
var clockSeqKey sync.Mutex
|
||||||
|
|
||||||
|
func getClockSequence() (uint16, error) {
|
||||||
|
clockSeqKey.Lock()
|
||||||
|
defer clockSeqKey.Unlock()
|
||||||
|
|
||||||
|
if 0 == clockSeqVal {
|
||||||
|
var temp [2]byte
|
||||||
|
if parity, err := rand.Read(temp[:]); !(2 == parity && nil == err) {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
clockSeqVal = uint16(temp[0])<<8 | uint16(temp[1])
|
||||||
|
}
|
||||||
|
clockSeqVal++
|
||||||
|
return clockSeqVal, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMACAddress() (mac [6]byte, err error) {
|
||||||
|
var hostNICs []net.Interface
|
||||||
|
|
||||||
|
hostNICs, err = net.Interfaces()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nic := range hostNICs {
|
||||||
|
var parity int
|
||||||
|
|
||||||
|
parity, err = fmt.Sscanf(
|
||||||
|
strings.ToLower(nic.HardwareAddr.String()),
|
||||||
|
"%02x:%02x:%02x:%02x:%02x:%02x",
|
||||||
|
&mac[0],
|
||||||
|
&mac[1],
|
||||||
|
&mac[2],
|
||||||
|
&mac[3],
|
||||||
|
&mac[4],
|
||||||
|
&mac[5])
|
||||||
|
|
||||||
|
if parity == len(mac) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fmt.Errorf("No suitable address found")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func version1() (result GUID, err error) {
|
||||||
|
var localMAC [6]byte
|
||||||
|
var clockSeq uint16
|
||||||
|
|
||||||
|
currentTime := getRFC4122Time()
|
||||||
|
|
||||||
|
result.timeLow = uint32(currentTime)
|
||||||
|
result.timeMid = uint16(currentTime >> 32)
|
||||||
|
result.timeHighAndVersion = uint16(currentTime >> 48)
|
||||||
|
if err = result.setVersion(1); err != nil {
|
||||||
|
return emptyGUID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if localMAC, err = getMACAddress(); nil != err {
|
||||||
|
if parity, err := rand.Read(localMAC[:]); !(len(localMAC) != parity && err == nil) {
|
||||||
|
return emptyGUID, err
|
||||||
|
}
|
||||||
|
localMAC[0] |= 0x1
|
||||||
|
}
|
||||||
|
copy(result.node[:], localMAC[:])
|
||||||
|
|
||||||
|
if clockSeq, err = getClockSequence(); nil != err {
|
||||||
|
return emptyGUID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.clockSeqLow = uint8(clockSeq)
|
||||||
|
result.clockSeqHighAndReserved = uint8(clockSeq >> 8)
|
||||||
|
|
||||||
|
result.setReservedBits()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func version4() (GUID, error) {
|
||||||
|
var retval GUID
|
||||||
|
var bits [10]byte
|
||||||
|
|
||||||
|
if parity, err := rand.Read(bits[:]); !(len(bits) == parity && err == nil) {
|
||||||
|
return emptyGUID, err
|
||||||
|
}
|
||||||
|
retval.timeHighAndVersion |= uint16(bits[0]) | uint16(bits[1])<<8
|
||||||
|
retval.timeMid |= uint16(bits[2]) | uint16(bits[3])<<8
|
||||||
|
retval.timeLow |= uint32(bits[4]) | uint32(bits[5])<<8 | uint32(bits[6])<<16 | uint32(bits[7])<<24
|
||||||
|
retval.clockSeqHighAndReserved = uint8(bits[8])
|
||||||
|
retval.clockSeqLow = uint8(bits[9])
|
||||||
|
|
||||||
|
//Randomly set clock-sequence, reserved, and node
|
||||||
|
if written, err := rand.Read(retval.node[:]); !(nil == err && written == len(retval.node)) {
|
||||||
|
retval = emptyGUID
|
||||||
|
return retval, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := retval.setVersion(4); nil != err {
|
||||||
|
return emptyGUID, err
|
||||||
|
}
|
||||||
|
retval.setReservedBits()
|
||||||
|
|
||||||
|
return retval, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (guid *GUID) setVersion(version uint16) error {
|
||||||
|
if version > 5 || version == 0 {
|
||||||
|
return fmt.Errorf("While setting GUID version, unsupported version: %d", version)
|
||||||
|
}
|
||||||
|
guid.timeHighAndVersion = (guid.timeHighAndVersion & 0x0fff) | version<<12
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (guid *GUID) setReservedBits() {
|
||||||
|
guid.clockSeqHighAndReserved = (guid.clockSeqHighAndReserved & 0x3f) | 0x80
|
||||||
|
}
|
20
vendor/github.com/satori/go.uuid/LICENSE
generated
vendored
Normal file
20
vendor/github.com/satori/go.uuid/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
65
vendor/github.com/satori/go.uuid/README.md
generated
vendored
Normal file
65
vendor/github.com/satori/go.uuid/README.md
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
# UUID package for Go language
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
|
||||||
|
[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
|
||||||
|
|
||||||
|
This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
|
||||||
|
|
||||||
|
With 100% test coverage and benchmarks out of box.
|
||||||
|
|
||||||
|
Supported versions:
|
||||||
|
* Version 1, based on timestamp and MAC address (RFC 4122)
|
||||||
|
* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
|
||||||
|
* Version 3, based on MD5 hashing (RFC 4122)
|
||||||
|
* Version 4, based on random numbers (RFC 4122)
|
||||||
|
* Version 5, based on SHA-1 hashing (RFC 4122)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Use the `go` command:
|
||||||
|
|
||||||
|
$ go get github.com/satori/go.uuid
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
UUID package requires Go >= 1.2.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/satori/go.uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Creating UUID Version 4
|
||||||
|
u1 := uuid.NewV4()
|
||||||
|
fmt.Printf("UUIDv4: %s\n", u1)
|
||||||
|
|
||||||
|
// Parsing UUID from string input
|
||||||
|
u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Something gone wrong: %s", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Successfully parsed: %s", u2)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
|
||||||
|
|
||||||
|
## Links
|
||||||
|
* [RFC 4122](http://tools.ietf.org/html/rfc4122)
|
||||||
|
* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
|
||||||
|
|
||||||
|
## Copyright
|
||||||
|
|
||||||
|
Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>.
|
||||||
|
|
||||||
|
UUID package released under MIT License.
|
||||||
|
See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
|
206
vendor/github.com/satori/go.uuid/codec.go
generated
vendored
Normal file
206
vendor/github.com/satori/go.uuid/codec.go
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
// a copy of this software and associated documentation files (the
|
||||||
|
// "Software"), to deal in the Software without restriction, including
|
||||||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
// permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
// the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be
|
||||||
|
// included in all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromBytes returns UUID converted from raw byte slice input.
|
||||||
|
// It will return error if the slice isn't 16 bytes long.
|
||||||
|
func FromBytes(input []byte) (u UUID, err error) {
|
||||||
|
err = u.UnmarshalBinary(input)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytesOrNil returns UUID converted from raw byte slice input.
|
||||||
|
// Same behavior as FromBytes, but returns a Nil UUID on error.
|
||||||
|
func FromBytesOrNil(input []byte) UUID {
|
||||||
|
uuid, err := FromBytes(input)
|
||||||
|
if err != nil {
|
||||||
|
return Nil
|
||||||
|
}
|
||||||
|
return uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromString returns UUID parsed from string input.
|
||||||
|
// Input is expected in a form accepted by UnmarshalText.
|
||||||
|
func FromString(input string) (u UUID, err error) {
|
||||||
|
err = u.UnmarshalText([]byte(input))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromStringOrNil returns UUID parsed from string input.
|
||||||
|
// Same behavior as FromString, but returns a Nil UUID on error.
|
||||||
|
func FromStringOrNil(input string) UUID {
|
||||||
|
uuid, err := FromString(input)
|
||||||
|
if err != nil {
|
||||||
|
return Nil
|
||||||
|
}
|
||||||
|
return uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements the encoding.TextMarshaler interface.
|
||||||
|
// The encoding is the same as returned by String.
|
||||||
|
func (u UUID) MarshalText() (text []byte, err error) {
|
||||||
|
text = []byte(u.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||||
|
// Following formats are supported:
|
||||||
|
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
|
||||||
|
// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
|
||||||
|
// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
|
||||||
|
// "6ba7b8109dad11d180b400c04fd430c8"
|
||||||
|
// ABNF for supported UUID text representation follows:
|
||||||
|
// uuid := canonical | hashlike | braced | urn
|
||||||
|
// plain := canonical | hashlike
|
||||||
|
// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
|
||||||
|
// hashlike := 12hexoct
|
||||||
|
// braced := '{' plain '}'
|
||||||
|
// urn := URN ':' UUID-NID ':' plain
|
||||||
|
// URN := 'urn'
|
||||||
|
// UUID-NID := 'uuid'
|
||||||
|
// 12hexoct := 6hexoct 6hexoct
|
||||||
|
// 6hexoct := 4hexoct 2hexoct
|
||||||
|
// 4hexoct := 2hexoct 2hexoct
|
||||||
|
// 2hexoct := hexoct hexoct
|
||||||
|
// hexoct := hexdig hexdig
|
||||||
|
// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
|
||||||
|
// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
|
||||||
|
// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
|
||||||
|
func (u *UUID) UnmarshalText(text []byte) (err error) {
|
||||||
|
switch len(text) {
|
||||||
|
case 32:
|
||||||
|
return u.decodeHashLike(text)
|
||||||
|
case 36:
|
||||||
|
return u.decodeCanonical(text)
|
||||||
|
case 38:
|
||||||
|
return u.decodeBraced(text)
|
||||||
|
case 41:
|
||||||
|
fallthrough
|
||||||
|
case 45:
|
||||||
|
return u.decodeURN(text)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("uuid: incorrect UUID length: %s", text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeCanonical decodes UUID string in format
|
||||||
|
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
|
||||||
|
func (u *UUID) decodeCanonical(t []byte) (err error) {
|
||||||
|
if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
|
||||||
|
return fmt.Errorf("uuid: incorrect UUID format %s", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
src := t[:]
|
||||||
|
dst := u[:]
|
||||||
|
|
||||||
|
for i, byteGroup := range byteGroups {
|
||||||
|
if i > 0 {
|
||||||
|
src = src[1:] // skip dash
|
||||||
|
}
|
||||||
|
_, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
src = src[byteGroup:]
|
||||||
|
dst = dst[byteGroup/2:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeHashLike decodes UUID string in format
|
||||||
|
// "6ba7b8109dad11d180b400c04fd430c8".
|
||||||
|
func (u *UUID) decodeHashLike(t []byte) (err error) {
|
||||||
|
src := t[:]
|
||||||
|
dst := u[:]
|
||||||
|
|
||||||
|
if _, err = hex.Decode(dst, src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeBraced decodes UUID string in format
|
||||||
|
// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
|
||||||
|
// "{6ba7b8109dad11d180b400c04fd430c8}".
|
||||||
|
func (u *UUID) decodeBraced(t []byte) (err error) {
|
||||||
|
l := len(t)
|
||||||
|
|
||||||
|
if t[0] != '{' || t[l-1] != '}' {
|
||||||
|
return fmt.Errorf("uuid: incorrect UUID format %s", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.decodePlain(t[1 : l-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeURN decodes UUID string in format
|
||||||
|
// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
|
||||||
|
// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
|
||||||
|
func (u *UUID) decodeURN(t []byte) (err error) {
|
||||||
|
total := len(t)
|
||||||
|
|
||||||
|
urn_uuid_prefix := t[:9]
|
||||||
|
|
||||||
|
if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
|
||||||
|
return fmt.Errorf("uuid: incorrect UUID format: %s", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.decodePlain(t[9:total])
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodePlain decodes UUID string in canonical format
|
||||||
|
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
|
||||||
|
// "6ba7b8109dad11d180b400c04fd430c8".
|
||||||
|
func (u *UUID) decodePlain(t []byte) (err error) {
|
||||||
|
switch len(t) {
|
||||||
|
case 32:
|
||||||
|
return u.decodeHashLike(t)
|
||||||
|
case 36:
|
||||||
|
return u.decodeCanonical(t)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||||
|
func (u UUID) MarshalBinary() (data []byte, err error) {
|
||||||
|
data = u.Bytes()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||||
|
// It will return error if the slice isn't 16 bytes long.
|
||||||
|
func (u *UUID) UnmarshalBinary(data []byte) (err error) {
|
||||||
|
if len(data) != Size {
|
||||||
|
err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
copy(u[:], data)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
239
vendor/github.com/satori/go.uuid/generator.go
generated
vendored
Normal file
239
vendor/github.com/satori/go.uuid/generator.go
generated
vendored
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
// a copy of this software and associated documentation files (the
|
||||||
|
// "Software"), to deal in the Software without restriction, including
|
||||||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
// permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
// the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be
|
||||||
|
// included in all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/binary"
|
||||||
|
"hash"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Difference in 100-nanosecond intervals between
|
||||||
|
// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
|
||||||
|
const epochStart = 122192928000000000
|
||||||
|
|
||||||
|
var (
|
||||||
|
global = newDefaultGenerator()
|
||||||
|
|
||||||
|
epochFunc = unixTimeFunc
|
||||||
|
posixUID = uint32(os.Getuid())
|
||||||
|
posixGID = uint32(os.Getgid())
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewV1 returns UUID based on current timestamp and MAC address.
|
||||||
|
func NewV1() UUID {
|
||||||
|
return global.NewV1()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV2 returns DCE Security UUID based on POSIX UID/GID.
|
||||||
|
func NewV2(domain byte) UUID {
|
||||||
|
return global.NewV2(domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
|
||||||
|
func NewV3(ns UUID, name string) UUID {
|
||||||
|
return global.NewV3(ns, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV4 returns random generated UUID.
|
||||||
|
func NewV4() UUID {
|
||||||
|
return global.NewV4()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
|
||||||
|
func NewV5(ns UUID, name string) UUID {
|
||||||
|
return global.NewV5(ns, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generator provides interface for generating UUIDs.
|
||||||
|
type Generator interface {
|
||||||
|
NewV1() UUID
|
||||||
|
NewV2(domain byte) UUID
|
||||||
|
NewV3(ns UUID, name string) UUID
|
||||||
|
NewV4() UUID
|
||||||
|
NewV5(ns UUID, name string) UUID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default generator implementation.
|
||||||
|
type generator struct {
|
||||||
|
storageOnce sync.Once
|
||||||
|
storageMutex sync.Mutex
|
||||||
|
|
||||||
|
lastTime uint64
|
||||||
|
clockSequence uint16
|
||||||
|
hardwareAddr [6]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDefaultGenerator() Generator {
|
||||||
|
return &generator{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV1 returns UUID based on current timestamp and MAC address.
|
||||||
|
func (g *generator) NewV1() UUID {
|
||||||
|
u := UUID{}
|
||||||
|
|
||||||
|
timeNow, clockSeq, hardwareAddr := g.getStorage()
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
|
||||||
|
binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
|
||||||
|
binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
|
||||||
|
binary.BigEndian.PutUint16(u[8:], clockSeq)
|
||||||
|
|
||||||
|
copy(u[10:], hardwareAddr)
|
||||||
|
|
||||||
|
u.SetVersion(V1)
|
||||||
|
u.SetVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV2 returns DCE Security UUID based on POSIX UID/GID.
|
||||||
|
func (g *generator) NewV2(domain byte) UUID {
|
||||||
|
u := UUID{}
|
||||||
|
|
||||||
|
timeNow, clockSeq, hardwareAddr := g.getStorage()
|
||||||
|
|
||||||
|
switch domain {
|
||||||
|
case DomainPerson:
|
||||||
|
binary.BigEndian.PutUint32(u[0:], posixUID)
|
||||||
|
case DomainGroup:
|
||||||
|
binary.BigEndian.PutUint32(u[0:], posixGID)
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
|
||||||
|
binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
|
||||||
|
binary.BigEndian.PutUint16(u[8:], clockSeq)
|
||||||
|
u[9] = domain
|
||||||
|
|
||||||
|
copy(u[10:], hardwareAddr)
|
||||||
|
|
||||||
|
u.SetVersion(V2)
|
||||||
|
u.SetVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
|
||||||
|
func (g *generator) NewV3(ns UUID, name string) UUID {
|
||||||
|
u := newFromHash(md5.New(), ns, name)
|
||||||
|
u.SetVersion(V3)
|
||||||
|
u.SetVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV4 returns random generated UUID.
|
||||||
|
func (g *generator) NewV4() UUID {
|
||||||
|
u := UUID{}
|
||||||
|
g.safeRandom(u[:])
|
||||||
|
u.SetVersion(V4)
|
||||||
|
u.SetVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
|
||||||
|
func (g *generator) NewV5(ns UUID, name string) UUID {
|
||||||
|
u := newFromHash(sha1.New(), ns, name)
|
||||||
|
u.SetVersion(V5)
|
||||||
|
u.SetVariant(VariantRFC4122)
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) initStorage() {
|
||||||
|
g.initClockSequence()
|
||||||
|
g.initHardwareAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) initClockSequence() {
|
||||||
|
buf := make([]byte, 2)
|
||||||
|
g.safeRandom(buf)
|
||||||
|
g.clockSequence = binary.BigEndian.Uint16(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) initHardwareAddr() {
|
||||||
|
interfaces, err := net.Interfaces()
|
||||||
|
if err == nil {
|
||||||
|
for _, iface := range interfaces {
|
||||||
|
if len(iface.HardwareAddr) >= 6 {
|
||||||
|
copy(g.hardwareAddr[:], iface.HardwareAddr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize hardwareAddr randomly in case
|
||||||
|
// of real network interfaces absence
|
||||||
|
g.safeRandom(g.hardwareAddr[:])
|
||||||
|
|
||||||
|
// Set multicast bit as recommended in RFC 4122
|
||||||
|
g.hardwareAddr[0] |= 0x01
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *generator) safeRandom(dest []byte) {
|
||||||
|
if _, err := rand.Read(dest); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns UUID v1/v2 storage state.
|
||||||
|
// Returns epoch timestamp, clock sequence, and hardware address.
|
||||||
|
func (g *generator) getStorage() (uint64, uint16, []byte) {
|
||||||
|
g.storageOnce.Do(g.initStorage)
|
||||||
|
|
||||||
|
g.storageMutex.Lock()
|
||||||
|
defer g.storageMutex.Unlock()
|
||||||
|
|
||||||
|
timeNow := epochFunc()
|
||||||
|
// Clock changed backwards since last UUID generation.
|
||||||
|
// Should increase clock sequence.
|
||||||
|
if timeNow <= g.lastTime {
|
||||||
|
g.clockSequence++
|
||||||
|
}
|
||||||
|
g.lastTime = timeNow
|
||||||
|
|
||||||
|
return timeNow, g.clockSequence, g.hardwareAddr[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns difference in 100-nanosecond intervals between
|
||||||
|
// UUID epoch (October 15, 1582) and current time.
|
||||||
|
// This is default epoch calculation function.
|
||||||
|
func unixTimeFunc() uint64 {
|
||||||
|
return epochStart + uint64(time.Now().UnixNano()/100)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns UUID based on hashing of namespace UUID and name.
|
||||||
|
func newFromHash(h hash.Hash, ns UUID, name string) UUID {
|
||||||
|
u := UUID{}
|
||||||
|
h.Write(ns[:])
|
||||||
|
h.Write([]byte(name))
|
||||||
|
copy(u[:], h.Sum(nil))
|
||||||
|
|
||||||
|
return u
|
||||||
|
}
|
78
vendor/github.com/satori/go.uuid/sql.go
generated
vendored
Normal file
78
vendor/github.com/satori/go.uuid/sql.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
// a copy of this software and associated documentation files (the
|
||||||
|
// "Software"), to deal in the Software without restriction, including
|
||||||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
// permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
// the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be
|
||||||
|
// included in all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Value implements the driver.Valuer interface.
|
||||||
|
func (u UUID) Value() (driver.Value, error) {
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements the sql.Scanner interface.
|
||||||
|
// A 16-byte slice is handled by UnmarshalBinary, while
|
||||||
|
// a longer byte slice or a string is handled by UnmarshalText.
|
||||||
|
func (u *UUID) Scan(src interface{}) error {
|
||||||
|
switch src := src.(type) {
|
||||||
|
case []byte:
|
||||||
|
if len(src) == Size {
|
||||||
|
return u.UnmarshalBinary(src)
|
||||||
|
}
|
||||||
|
return u.UnmarshalText(src)
|
||||||
|
|
||||||
|
case string:
|
||||||
|
return u.UnmarshalText([]byte(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("uuid: cannot convert %T to UUID", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NullUUID can be used with the standard sql package to represent a
|
||||||
|
// UUID value that can be NULL in the database
|
||||||
|
type NullUUID struct {
|
||||||
|
UUID UUID
|
||||||
|
Valid bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value implements the driver.Valuer interface.
|
||||||
|
func (u NullUUID) Value() (driver.Value, error) {
|
||||||
|
if !u.Valid {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
// Delegate to UUID Value function
|
||||||
|
return u.UUID.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements the sql.Scanner interface.
|
||||||
|
func (u *NullUUID) Scan(src interface{}) error {
|
||||||
|
if src == nil {
|
||||||
|
u.UUID, u.Valid = Nil, false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delegate to UUID Scan function
|
||||||
|
u.Valid = true
|
||||||
|
return u.UUID.Scan(src)
|
||||||
|
}
|
161
vendor/github.com/satori/go.uuid/uuid.go
generated
vendored
Normal file
161
vendor/github.com/satori/go.uuid/uuid.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
// a copy of this software and associated documentation files (the
|
||||||
|
// "Software"), to deal in the Software without restriction, including
|
||||||
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
// permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
// the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be
|
||||||
|
// included in all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
// Package uuid provides implementation of Universally Unique Identifier (UUID).
|
||||||
|
// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
|
||||||
|
// version 2 (as specified in DCE 1.1).
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Size of a UUID in bytes.
|
||||||
|
const Size = 16
|
||||||
|
|
||||||
|
// UUID representation compliant with specification
|
||||||
|
// described in RFC 4122.
|
||||||
|
type UUID [Size]byte
|
||||||
|
|
||||||
|
// UUID versions
|
||||||
|
const (
|
||||||
|
_ byte = iota
|
||||||
|
V1
|
||||||
|
V2
|
||||||
|
V3
|
||||||
|
V4
|
||||||
|
V5
|
||||||
|
)
|
||||||
|
|
||||||
|
// UUID layout variants.
|
||||||
|
const (
|
||||||
|
VariantNCS byte = iota
|
||||||
|
VariantRFC4122
|
||||||
|
VariantMicrosoft
|
||||||
|
VariantFuture
|
||||||
|
)
|
||||||
|
|
||||||
|
// UUID DCE domains.
|
||||||
|
const (
|
||||||
|
DomainPerson = iota
|
||||||
|
DomainGroup
|
||||||
|
DomainOrg
|
||||||
|
)
|
||||||
|
|
||||||
|
// String parse helpers.
|
||||||
|
var (
|
||||||
|
urnPrefix = []byte("urn:uuid:")
|
||||||
|
byteGroups = []int{8, 4, 4, 4, 12}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Nil is special form of UUID that is specified to have all
|
||||||
|
// 128 bits set to zero.
|
||||||
|
var Nil = UUID{}
|
||||||
|
|
||||||
|
// Predefined namespace UUIDs.
|
||||||
|
var (
|
||||||
|
NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
|
||||||
|
NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
|
||||||
|
NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||||
|
NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||||
|
)
|
||||||
|
|
||||||
|
// Equal returns true if u1 and u2 equals, otherwise returns false.
|
||||||
|
func Equal(u1 UUID, u2 UUID) bool {
|
||||||
|
return bytes.Equal(u1[:], u2[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns algorithm version used to generate UUID.
|
||||||
|
func (u UUID) Version() byte {
|
||||||
|
return u[6] >> 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variant returns UUID layout variant.
|
||||||
|
func (u UUID) Variant() byte {
|
||||||
|
switch {
|
||||||
|
case (u[8] >> 7) == 0x00:
|
||||||
|
return VariantNCS
|
||||||
|
case (u[8] >> 6) == 0x02:
|
||||||
|
return VariantRFC4122
|
||||||
|
case (u[8] >> 5) == 0x06:
|
||||||
|
return VariantMicrosoft
|
||||||
|
case (u[8] >> 5) == 0x07:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
return VariantFuture
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns bytes slice representation of UUID.
|
||||||
|
func (u UUID) Bytes() []byte {
|
||||||
|
return u[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns canonical string representation of UUID:
|
||||||
|
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
|
||||||
|
func (u UUID) String() string {
|
||||||
|
buf := make([]byte, 36)
|
||||||
|
|
||||||
|
hex.Encode(buf[0:8], u[0:4])
|
||||||
|
buf[8] = '-'
|
||||||
|
hex.Encode(buf[9:13], u[4:6])
|
||||||
|
buf[13] = '-'
|
||||||
|
hex.Encode(buf[14:18], u[6:8])
|
||||||
|
buf[18] = '-'
|
||||||
|
hex.Encode(buf[19:23], u[8:10])
|
||||||
|
buf[23] = '-'
|
||||||
|
hex.Encode(buf[24:], u[10:])
|
||||||
|
|
||||||
|
return string(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetVersion sets version bits.
|
||||||
|
func (u *UUID) SetVersion(v byte) {
|
||||||
|
u[6] = (u[6] & 0x0f) | (v << 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetVariant sets variant bits.
|
||||||
|
func (u *UUID) SetVariant(v byte) {
|
||||||
|
switch v {
|
||||||
|
case VariantNCS:
|
||||||
|
u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
|
||||||
|
case VariantRFC4122:
|
||||||
|
u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
|
||||||
|
case VariantMicrosoft:
|
||||||
|
u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
|
||||||
|
case VariantFuture:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must is a helper that wraps a call to a function returning (UUID, error)
|
||||||
|
// and panics if the error is non-nil. It is intended for use in variable
|
||||||
|
// initializations such as
|
||||||
|
// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
|
||||||
|
func Must(u UUID, err error) UUID {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return u
|
||||||
|
}
|
Loading…
Reference in a new issue