Update vendored azure-sdk-for-go

Updating to a recent version of Azure Storage SDK to be
able to patch some memory leaks through configurable HTTP client
changes which were made possible by recent patches to it.

Signed-off-by: Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
This commit is contained in:
Ahmet Alp Balkan 2016-10-28 15:46:05 -07:00
parent 62e88f0fe7
commit 2ab25288a2
No known key found for this signature in database
GPG key ID: 0E1B7FD112EEE4A0
10 changed files with 1241 additions and 40 deletions

4
Godeps/Godeps.json generated
View file

@ -8,8 +8,8 @@
"Deps": [
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v1.2-334-g95361a2",
"Rev": "95361a2573b1fa92a00c5fc2707a80308483c6f9"
"Comment": "v5.0.0-beta-6-g0b5fe2a",
"Rev": "0b5fe2abe0271ba07049eacaa65922d67c319543"
},
{
"ImportPath": "github.com/Sirupsen/logrus",

View file

@ -116,7 +116,7 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil {
if _, err := d.client.DeleteBlobIfExists(d.container, path, nil); err != nil {
return err
}
writer, err := d.Writer(ctx, path, false)
@ -151,7 +151,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
}
bytesRange := fmt.Sprintf("%v-", offset)
resp, err := d.client.GetBlobRange(d.container, path, bytesRange)
resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil)
if err != nil {
return nil, err
}
@ -174,7 +174,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged
}
size = blobProperties.ContentLength
} else {
err := d.client.DeleteBlob(d.container, path)
err := d.client.DeleteBlob(d.container, path, nil)
if err != nil {
return nil, err
}
@ -272,12 +272,12 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
return err
}
return d.client.DeleteBlob(d.container, sourcePath)
return d.client.DeleteBlob(d.container, sourcePath, nil)
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
ok, err := d.client.DeleteBlobIfExists(d.container, path)
ok, err := d.client.DeleteBlobIfExists(d.container, path, nil)
if err != nil {
return err
}
@ -292,7 +292,7 @@ func (d *driver) Delete(ctx context.Context, path string) error {
}
for _, b := range blobs {
if err = d.client.DeleteBlob(d.container, b); err != nil {
if err = d.client.DeleteBlob(d.container, b, nil); err != nil {
return err
}
}
@ -442,7 +442,7 @@ func (w *writer) Cancel() error {
return fmt.Errorf("already committed")
}
w.cancelled = true
return w.driver.client.DeleteBlob(w.driver.container, w.path)
return w.driver.client.DeleteBlob(w.driver.container, w.path, nil)
}
func (w *writer) Commit() error {
@ -470,7 +470,7 @@ func (bw *blockWriter) Write(p []byte) (int, error) {
if offset+chunkSize > len(p) {
chunkSize = len(p) - offset
}
err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize])
err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil)
if err != nil {
return n, err
}

View file

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -0,0 +1,5 @@
# Azure Storage SDK for Go
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)

View file

@ -55,7 +55,51 @@ type ContainerListResponse struct {
type Blob struct {
Name string `xml:"Name"`
Properties BlobProperties `xml:"Properties"`
// TODO (ahmetalpbalkan) Metadata
Metadata BlobMetadata `xml:"Metadata"`
}
// BlobMetadata is a set of custom name/value pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
type BlobMetadata map[string]string
type blobMetadataEntries struct {
Entries []blobMetadataEntry `xml:",any"`
}
type blobMetadataEntry struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
// UnmarshalXML converts the xml:Metadata into Metadata map
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var entries blobMetadataEntries
if err := d.DecodeElement(&entries, &start); err != nil {
return err
}
for _, entry := range entries.Entries {
if *bm == nil {
*bm = make(BlobMetadata)
}
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
}
return nil
}
// MarshalXML implements the xml.Marshaler interface. It encodes
// metadata name/value pairs as they would appear in an Azure
// ListBlobs response.
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
entries := make([]blobMetadataEntry, 0, len(bm))
for k, v := range bm {
entries = append(entries, blobMetadataEntry{
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
Value: v,
})
}
return enc.EncodeElement(blobMetadataEntries{
Entries: entries,
}, start)
}
// BlobProperties contains various properties of a blob
@ -67,6 +111,8 @@ type BlobProperties struct {
ContentLength int64 `xml:"Content-Length"`
ContentType string `xml:"Content-Type"`
ContentEncoding string `xml:"Content-Encoding"`
CacheControl string `xml:"Cache-Control"`
ContentLanguage string `xml:"Cache-Language"`
BlobType BlobType `xml:"x-ms-blob-blob-type"`
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
CopyID string `xml:"CopyId"`
@ -75,6 +121,17 @@ type BlobProperties struct {
CopyProgress string `xml:"CopyProgress"`
CopyCompletionTime string `xml:"CopyCompletionTime"`
CopyStatusDescription string `xml:"CopyStatusDescription"`
LeaseStatus string `xml:"LeaseStatus"`
}
// BlobHeaders contains various properties of a blob and is an entry
// in SetBlobProperties
type BlobHeaders struct {
ContentMD5 string `header:"x-ms-blob-content-md5"`
ContentLanguage string `header:"x-ms-blob-content-language"`
ContentEncoding string `header:"x-ms-blob-content-encoding"`
ContentType string `header:"x-ms-blob-content-type"`
CacheControl string `header:"x-ms-blob-cache-control"`
}
// BlobListResponse contains the response fields from ListBlobs call.
@ -88,6 +145,16 @@ type BlobListResponse struct {
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Blobs []Blob `xml:"Blobs>Blob"`
// BlobPrefix is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
// The list here can be thought of as "folders" that may contain
// other folders or blobs.
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
// Delimiter is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
Delimiter string `xml:"Delimiter"`
}
// ListContainersParameters defines the set of customizable parameters to make a
@ -189,6 +256,23 @@ const (
blobCopyStatusFailed = "failed"
)
// lease constants.
const (
leaseHeaderPrefix = "x-ms-lease-"
leaseID = "x-ms-lease-id"
leaseAction = "x-ms-lease-action"
leaseBreakPeriod = "x-ms-lease-break-period"
leaseDuration = "x-ms-lease-duration"
leaseProposedID = "x-ms-proposed-lease-id"
leaseTime = "x-ms-lease-time"
acquireLease = "acquire"
renewLease = "renew"
changeLease = "change"
releaseLease = "release"
breakLease = "break"
)
// BlockListType is used to filter out types of blocks in a Get Blocks List call
// for a block blob.
//
@ -419,7 +503,6 @@ func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameter
func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
verb := "HEAD"
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
headers := b.client.getStandardHeaders()
resp, err := b.client.exec(verb, uri, headers, nil)
if resp != nil {
@ -447,7 +530,7 @@ func (b BlobStorageClient) GetBlobURL(container, name string) string {
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, "")
resp, err := b.getBlobRange(container, name, "", nil)
if err != nil {
return nil, err
}
@ -462,8 +545,8 @@ func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, bytesRange)
func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders)
if err != nil {
return nil, err
}
@ -474,7 +557,7 @@ func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.
return resp.body, nil
}
func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) {
func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) {
verb := "GET"
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
@ -482,6 +565,11 @@ func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*st
if bytesRange != "" {
headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange)
}
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := b.client.exec(verb, uri, headers, nil)
if err != nil {
return nil, err
@ -489,6 +577,134 @@ func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*st
return resp, err
}
// leasePut is common PUT code for the various aquire/release/break etc functions.
func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) {
params := url.Values{"comp": {"lease"}}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
resp, err := b.client.exec("PUT", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
return nil, err
}
return resp.headers, nil
}
// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// returns leaseID acquired
func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = acquireLease
headers[leaseProposedID] = proposedLeaseID
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated)
if err != nil {
return "", err
}
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID))
if returnedLeaseID != "" {
return returnedLeaseID, nil
}
// what should we return in case of HTTP 201 but no lease ID?
// or it just cant happen? (brave words)
return "", errors.New("LeaseID not returned")
}
// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// Returns the timeout remaining in the lease in seconds
func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = breakLease
return b.breakLeaseCommon(container, name, headers)
}
// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// breakPeriodInSeconds is used to determine how long until new lease can be created.
// Returns the timeout remaining in the lease in seconds
func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = breakLease
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
return b.breakLeaseCommon(container, name, headers)
}
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) {
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted)
if err != nil {
return 0, err
}
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
if breakTimeoutStr != "" {
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
if err != nil {
return 0, err
}
}
return breakTimeout, nil
}
// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// Returns the new LeaseID acquired
func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = changeLease
headers[leaseID] = currentLeaseID
headers[leaseProposedID] = proposedLeaseID
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
if err != nil {
return "", err
}
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID))
if newLeaseID != "" {
return newLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error {
headers := b.client.getStandardHeaders()
headers[leaseAction] = releaseLease
headers[leaseID] = currentLeaseID
_, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
if err != nil {
return err
}
return nil
}
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error {
headers := b.client.getStandardHeaders()
headers[leaseAction] = renewLease
headers[leaseID] = currentLeaseID
_, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
if err != nil {
return err
}
return nil
}
// GetBlobProperties provides various information about the specified
// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) {
@ -530,6 +746,9 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
ContentMD5: resp.headers.Get("Content-MD5"),
ContentLength: contentLength,
ContentEncoding: resp.headers.Get("Content-Encoding"),
ContentType: resp.headers.Get("Content-Type"),
CacheControl: resp.headers.Get("Cache-Control"),
ContentLanguage: resp.headers.Get("Content-Language"),
SequenceNumber: sequenceNum,
CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"),
CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"),
@ -538,9 +757,38 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
CopySource: resp.headers.Get("x-ms-copy-source"),
CopyStatus: resp.headers.Get("x-ms-copy-status"),
BlobType: BlobType(resp.headers.Get("x-ms-blob-type")),
LeaseStatus: resp.headers.Get("x-ms-lease-status"),
}, nil
}
// SetBlobProperties replaces the BlobHeaders for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx
func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error {
params := url.Values{"comp": {"properties"}}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
headers := b.client.getStandardHeaders()
extraHeaders := headersFromStruct(blobHeaders)
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := b.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// SetBlobMetadata replaces the metadata for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
@ -549,7 +797,7 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string) error {
func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error {
params := url.Values{"comp": {"metadata"}}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
headers := b.client.getStandardHeaders()
@ -557,6 +805,10 @@ func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[
headers[userDefinedMetadataHeaderPrefix+k] = v
}
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := b.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
@ -749,14 +1001,16 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extra
// with 512-byte boundaries and chunk must be of size multiplies by 512.
//
// See https://msdn.microsoft.com/en-us/library/ee691975.aspx
func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error {
func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error {
path := fmt.Sprintf("%s/%s", container, name)
uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}})
headers := b.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-page-write"] = string(writeType)
headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte)
for k, v := range extraHeaders {
headers[k] = v
}
var contentLength int64
var data io.Reader
if writeType == PageWriteTypeClear {
@ -825,13 +1079,17 @@ func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders ma
// AppendBlock appends a block to an append blob.
//
// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx
func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte) error {
func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error {
path := fmt.Sprintf("%s/%s", container, name)
uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}})
headers := b.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
if err != nil {
return err
@ -908,8 +1166,8 @@ func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error
// DeleteBlob deletes the given blob from the specified container.
// If the blob does not exists at the time of the Delete Blob operation, it
// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
func (b BlobStorageClient) DeleteBlob(container, name string) error {
resp, err := b.deleteBlob(container, name)
func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error {
resp, err := b.deleteBlob(container, name, extraHeaders)
if err != nil {
return err
}
@ -921,19 +1179,24 @@ func (b BlobStorageClient) DeleteBlob(container, name string) error {
// blob is deleted with this call, returns true. Otherwise returns false.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) {
resp, err := b.deleteBlob(container, name)
if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) {
return resp.statusCode == http.StatusAccepted, nil
func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) {
resp, err := b.deleteBlob(container, name, extraHeaders)
if resp != nil {
defer resp.body.Close()
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
}
defer resp.body.Close()
return false, err
}
func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) {
func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) {
verb := "DELETE"
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
headers := b.client.getStandardHeaders()
for k, v := range extraHeaders {
headers[k] = v
}
return b.client.exec(verb, uri, headers, nil)
}
@ -959,10 +1222,25 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim
blobURL = b.GetBlobURL(container, name)
)
canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL)
if err != nil {
return "", err
}
signedExpiry := expiry.Format(time.RFC3339)
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
// later, the storage account name, and the resource name, and must be URL-decoded.
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
signedExpiry := expiry.UTC().Format(time.RFC3339)
signedResource := "b"
stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions)

View file

@ -4,6 +4,7 @@ package storage
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
@ -28,15 +29,29 @@ const (
defaultUseHTTPS = true
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountName = "devstoreaccount1"
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
blobServiceName = "blob"
tableServiceName = "table"
queueServiceName = "queue"
fileServiceName = "file"
storageEmulatorBlob = "127.0.0.1:10000"
storageEmulatorTable = "127.0.0.1:10002"
storageEmulatorQueue = "127.0.0.1:10001"
)
// Client is the object that needs to be constructed to perform
// operations on the storage account.
type Client struct {
// HTTPClient is the http.Client used to initiate API
// requests. If it is nil, http.DefaultClient is used.
HTTPClient *http.Client
accountName string
accountKey []byte
useHTTPS bool
@ -50,6 +65,11 @@ type storageResponse struct {
body io.ReadCloser
}
type odataResponse struct {
storageResponse
odata odataErrorMessage
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
@ -64,6 +84,20 @@ type AzureStorageServiceError struct {
RequestID string
}
type odataErrorMessageMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataErrorMessageInternal struct {
Code string `json:"code"`
Message odataErrorMessageMessage `json:"message"`
}
type odataErrorMessage struct {
Err odataErrorMessageInternal `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
@ -90,9 +124,18 @@ func (e UnexpectedStatusCodeError) Got() int {
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
//NewEmulatorClient contructs a Client intended to only work with Azure
//Storage Emulator
func NewEmulatorClient() (Client, error) {
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
@ -108,7 +151,7 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return c, err
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
}
return Client{
@ -125,8 +168,19 @@ func (c Client) getBaseURL(service string) string {
if c.useHTTPS {
scheme = "https"
}
host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
host := ""
if c.accountName == StorageEmulatorAccountName {
switch service {
case blobServiceName:
host = storageEmulatorBlob
case tableServiceName:
host = storageEmulatorTable
case queueServiceName:
host = storageEmulatorQueue
}
} else {
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
}
u := &url.URL{
Scheme: scheme,
@ -141,8 +195,13 @@ func (c Client) getEndpoint(service, path string, params url.Values) string {
panic(err)
}
if path == "" {
path = "/" // API doesn't accept path segments not starting with '/'
// API doesn't accept path segments not starting with '/'
if !strings.HasPrefix(path, "/") {
path = fmt.Sprintf("/%v", path)
}
if c.accountName == StorageEmulatorAccountName {
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
}
u.Path = path
@ -162,6 +221,12 @@ func (c Client) GetQueueService() QueueServiceClient {
return QueueServiceClient{c}
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
return TableServiceClient{c}
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
@ -170,7 +235,7 @@ func (c Client) GetFileService() FileServiceClient {
func (c Client) createAuthorizationHeader(canonicalizedString string) string {
signature := c.computeHmac256(canonicalizedString)
return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature)
return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature)
}
func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) {
@ -190,6 +255,12 @@ func (c Client) getStandardHeaders() map[string]string {
}
}
func (c Client) getCanonicalizedAccountName() string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(c.accountName, "-secondary")
}
func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string)
@ -224,6 +295,22 @@ func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
return ch
}
func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) {
errMsg := "buildCanonicalizedResourceTable error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := "/" + c.getCanonicalizedAccountName()
if len(u.Path) > 0 {
cr += u.Path
}
return cr, nil
}
func (c Client) buildCanonicalizedResource(uri string) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
@ -231,9 +318,13 @@ func (c Client) buildCanonicalizedResource(uri string) (string, error) {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := "/" + c.accountName
cr := "/" + c.getCanonicalizedAccountName()
if len(u.Path) > 0 {
cr += u.Path
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr += u.EscapedPath()
}
params, err := url.ParseQuery(u.RawQuery)
@ -262,6 +353,7 @@ func (c Client) buildCanonicalizedResource(uri string) (string, error) {
}
}
}
return cr, nil
}
@ -295,7 +387,6 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
return nil, err
}
headers["Authorization"] = authHeader
if err != nil {
return nil, err
}
@ -318,7 +409,11 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := http.Client{}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
@ -356,6 +451,70 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
body: resp.Body}, nil
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
respToRet := &odataResponse{}
respToRet.body = resp.Body
respToRet.statusCode = resp.StatusCode
respToRet.headers = resp.Header
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body
err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode)
return respToRet, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
return respToRet, err
}
return respToRet, nil
}
func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) {
can, err := c.buildCanonicalizedResourceTable(url)
if err != nil {
return "", err
}
strToSign := headers["x-ms-date"] + "\n" + can
hmac := c.computeHmac256(strToSign)
return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil
}
func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
var err error
headers["Authorization"], err = c.createSharedKeyLite(url, headers)
if err != nil {
return nil, err
}
return c.execInternalJSON(verb, url, headers, body)
}
func readResponseBody(resp *http.Response) ([]byte, error) {
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)

View file

@ -1,9 +1,11 @@
package storage
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strings"
)
// FileServiceClient contains operations for Microsoft Azure File Service.
@ -11,11 +13,99 @@ type FileServiceClient struct {
client Client
}
// A Share is an entry in ShareListResponse.
type Share struct {
Name string `xml:"Name"`
Properties ShareProperties `xml:"Properties"`
}
// ShareProperties contains various properties of a share returned from
// various endpoints like ListShares.
type ShareProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
Quota string `xml:"Quota"`
}
// ShareListResponse contains the response fields from
// ListShares call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
type ShareListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Shares []Share `xml:"Shares>Share"`
}
// ListSharesParameters defines the set of customizable parameters to make a
// List Shares call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
type ListSharesParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// ShareHeaders contains various properties of a file and is an entry
// in SetShareProperties
type ShareHeaders struct {
Quota string `header:"x-ms-share-quota"`
}
func (p ListSharesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// pathForFileShare returns the URL path segment for a File Share resource
func pathForFileShare(name string) string {
return fmt.Sprintf("/%s", name)
}
// ListShares returns the list of shares in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := f.client.getEndpoint(fileServiceName, "", q)
headers := f.client.getStandardHeaders()
var out ShareListResponse
resp, err := f.client.exec("GET", uri, headers, nil)
if err != nil {
return out, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out)
return out, err
}
// CreateShare operation creates a new share under the specified account. If the
// share with the same name already exists, the operation fails.
//
@ -29,6 +119,30 @@ func (f FileServiceClient) CreateShare(name string) error {
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// ShareExists returns true if a share with given name exists
// on the storage account, otherwise returns false.
func (f FileServiceClient) ShareExists(name string) (bool, error) {
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
headers := f.client.getStandardHeaders()
resp, err := f.client.exec("HEAD", uri, headers, nil)
if resp != nil {
defer resp.body.Close()
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusOK, nil
}
}
return false, err
}
// GetShareURL gets the canonical URL to the share with the specified name in the
// specified container. This method does not create a publicly accessible URL if
// the file is private and this method does not check if the file
// exists.
func (f FileServiceClient) GetShareURL(name string) string {
return f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{})
}
// CreateShareIfNotExists creates a new share under the specified account if
// it does not exist. Returns true if container is newly created or false if
// container already exists.
@ -47,11 +161,68 @@ func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) {
// CreateShare creates a Azure File Share and returns its response
func (f FileServiceClient) createShare(name string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
headers := f.client.getStandardHeaders()
return f.client.exec("PUT", uri, headers, nil)
}
// GetShareProperties provides various information about the specified
// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx
func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) {
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
headers := f.client.getStandardHeaders()
resp, err := f.client.exec("HEAD", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
return &ShareProperties{
LastModified: resp.headers.Get("Last-Modified"),
Etag: resp.headers.Get("Etag"),
Quota: resp.headers.Get("x-ms-share-quota"),
}, nil
}
// SetShareProperties replaces the ShareHeaders for the specified file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetShareProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error {
params := url.Values{}
params.Set("restype", "share")
params.Set("comp", "properties")
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
headers := f.client.getStandardHeaders()
extraHeaders := headersFromStruct(shareHeaders)
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := f.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// DeleteShare operation marks the specified share for deletion. The share
// and any files contained within it are later deleted during garbage
// collection.
@ -86,6 +257,96 @@ func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) {
// deleteShare makes the call to Delete Share operation endpoint and returns
// the response
func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil)
}
// SetShareMetadata replaces the metadata for the specified Share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetShareMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string, extraHeaders map[string]string) error {
params := url.Values{}
params.Set("restype", "share")
params.Set("comp", "metadata")
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
headers := f.client.getStandardHeaders()
for k, v := range metadata {
headers[userDefinedMetadataHeaderPrefix+k] = v
}
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := f.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// GetShareMetadata returns all user-defined metadata for the specified share.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) {
params := url.Values{}
params.Set("restype", "share")
params.Set("comp", "metadata")
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
headers := f.client.getStandardHeaders()
resp, err := f.client.exec("GET", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
metadata := make(map[string]string)
for k, v := range resp.headers {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
return metadata, nil
}
//checkForStorageEmulator determines if the client is setup for use with
//Azure Storage Emulator, and returns a relevant error
func (f FileServiceClient) checkForStorageEmulator() error {
if f.client.accountName == StorageEmulatorAccountName {
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
}
return nil
}

View file

@ -0,0 +1,129 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
// TableServiceClient contains operations for Microsoft Azure Table Storage
// Service.
type TableServiceClient struct {
client Client
}
// AzureTable is the typedef of the Azure Table name
type AzureTable string
const (
tablesURIPath = "/Tables"
)
type createTableRequest struct {
TableName string `json:"TableName"`
}
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
func (c *TableServiceClient) getStandardHeaders() map[string]string {
return map[string]string{
"x-ms-version": "2015-02-21",
"x-ms-date": currentTimeRfc1123Formatted(),
"Accept": "application/json;odata=nometadata",
"Accept-Charset": "UTF-8",
"Content-Type": "application/json",
}
}
// QueryTables returns the tables created in the
// *TableServiceClient storage account.
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("GET", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.body)
var respArray queryTablesResponse
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
return nil, err
}
s := make([]AzureTable, len(respArray.TableName))
for i, elem := range respArray.TableName {
s[i] = AzureTable(elem.TableName)
}
return s, nil
}
// CreateTable creates the table given the specific
// name. This function fails if the name is not compliant
// with the specification or the tables already exists.
func (c *TableServiceClient) CreateTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
req := createTableRequest{TableName: string(table)}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(req); err != nil {
return err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
resp, err := c.client.execTable("POST", uri, headers, buf)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
return err
}
return nil
}
// DeleteTable deletes the table given the specific
// name. This function fails if the table is not present.
// Be advised: DeleteTable deletes all the entries
// that may be present.
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
uri += fmt.Sprintf("('%s')", string(table))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("DELETE", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,355 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
)
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
tag = "table"
tagIgnore = "-"
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
maxTopParameter = 1000
)
type queryTablesResponse struct {
TableName []struct {
TableName string `json:"TableName"`
} `json:"value"`
}
const (
tableOperationTypeInsert = iota
tableOperationTypeUpdate = iota
tableOperationTypeMerge = iota
tableOperationTypeInsertOrReplace = iota
tableOperationTypeInsertOrMerge = iota
)
type tableOperation int
// TableEntity interface specifies
// the functions needed to support
// marshaling and unmarshaling into
// Azure Tables. The struct must only contain
// simple types because Azure Tables do not
// support hierarchy.
type TableEntity interface {
PartitionKey() string
RowKey() string
SetPartitionKey(string) error
SetRowKey(string) error
}
// ContinuationToken is an opaque (ie not useful to inspect)
// struct that Get... methods can return if there are more
// entries to be returned than the ones already
// returned. Just pass it to the same function to continue
// receiving the remaining entries.
type ContinuationToken struct {
NextPartitionKey string
NextRowKey string
}
type getTableEntriesResponse struct {
Elements []map[string]interface{} `json:"value"`
}
// QueryTableEntities queries the specified table and returns the unmarshaled
// entities of type retType.
// top parameter limits the returned entries up to top. Maximum top
// allowed by Azure API is 1000. In case there are more than top entries to be
// returned the function will return a non nil *ContinuationToken. You can call the
// same function again passing the received ContinuationToken as previousContToken
// parameter in order to get the following entries. The query parameter
// is the odata query. To retrieve all the entries pass the empty string.
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
// if there are more entries to be returned and an error in case something went
// wrong.
//
// Example:
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
if top > maxTopParameter {
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
}
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
uri += fmt.Sprintf("?$top=%d", top)
if query != "" {
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
}
if previousContToken != nil {
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
}
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("GET", uri, headers, nil)
if err != nil {
return nil, nil, err
}
contToken := extractContinuationTokenFromHeaders(resp.headers)
if err != nil {
return nil, contToken, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, contToken, err
}
retEntries, err := deserializeEntity(retType, resp.body)
if err != nil {
return nil, contToken, err
}
return retEntries, contToken, nil
}
// InsertEntity inserts an entity in the specified table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, false, "POST"); err != nil {
return checkRespCode(sc, []int{http.StatusCreated})
}
return err
}
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
if specifyKeysInURL {
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
}
headers := c.getStandardHeaders()
var buf bytes.Buffer
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
return 0, err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
var err error
var resp *odataResponse
resp, err = c.client.execTable(method, uri, headers, &buf)
if err != nil {
return 0, err
}
defer resp.body.Close()
return resp.statusCode, nil
}
// UpdateEntity updates the contents of an entity with the
// one passed as parameter. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// MergeEntity merges the contents of an entity with the
// one passed as parameter.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// DeleteEntityWithoutCheck deletes the entity matching by
// PartitionKey and RowKey. There is no check on IfMatch
// parameter so the entity is always deleted.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
return c.DeleteEntity(table, entity, "*")
}
// DeleteEntity deletes the entity matching by
// PartitionKey, RowKey and ifMatch field.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or
// the ifMatch is different.
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
headers["If-Match"] = ifMatch
resp, err := c.client.execTable("DELETE", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}
// InsertOrReplaceEntity inserts an entity in the specified table
// or replaced the existing one.
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// InsertOrMergeEntity inserts an entity in the specified table
// or merges the existing one.
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
if err := json.NewEncoder(buf).Encode(entity); err != nil {
return err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return err
}
// Inject PartitionKey and RowKey
dec[partitionKeyNode] = entity.PartitionKey()
dec[rowKeyNode] = entity.RowKey()
// Remove tagged fields
// The tag is defined in the const section
// This is useful to avoid storing the PartitionKey and RowKey twice.
numFields := reflect.ValueOf(entity).Elem().NumField()
for i := 0; i < numFields; i++ {
f := reflect.ValueOf(entity).Elem().Type().Field(i)
if f.Tag.Get(tag) == tagIgnore {
// we must look for its JSON name in the dictionary
// as the user can rename it using a tag
jsonName := f.Name
if f.Tag.Get("json") != "" {
jsonName = f.Tag.Get("json")
}
delete(dec, jsonName)
}
}
buf.Reset()
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
return err
}
return nil
}
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
buf := new(bytes.Buffer)
var ret getTableEntriesResponse
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
return nil, err
}
tEntries := make([]TableEntity, len(ret.Elements))
for i, entry := range ret.Elements {
buf.Reset()
if err := json.NewEncoder(buf).Encode(entry); err != nil {
return nil, err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return nil, err
}
var pKey, rKey string
// strip pk and rk
for key, val := range dec {
switch key {
case partitionKeyNode:
pKey = val.(string)
case rowKeyNode:
rKey = val.(string)
}
}
delete(dec, partitionKeyNode)
delete(dec, rowKeyNode)
buf.Reset()
if err := json.NewEncoder(buf).Encode(dec); err != nil {
return nil, err
}
// Create a empty retType instance
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
// Popolate it with the values
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
return nil, err
}
// Reset PartitionKey and RowKey
tEntries[i].SetPartitionKey(pKey)
tEntries[i].SetRowKey(rKey)
}
return tEntries, nil
}
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
return &ct
}
return nil
}

View file

@ -11,6 +11,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"reflect"
"time"
)
@ -69,3 +70,16 @@ func xmlMarshal(v interface{}) (io.Reader, int, error) {
}
return bytes.NewReader(b), len(b), nil
}
func headersFromStruct(v interface{}) map[string]string {
headers := make(map[string]string)
value := reflect.ValueOf(v)
for i := 0; i < value.NumField(); i++ {
key := value.Type().Field(i).Tag.Get("header")
val := value.Field(i).String()
if val != "" {
headers[key] = val
}
}
return headers
}