2014-11-25 18:40:24 +00:00
|
|
|
// Package azure provides a storagedriver.StorageDriver implementation to
|
|
|
|
// store blobs in Microsoft Azure Blob Storage Service.
|
|
|
|
package azure
|
|
|
|
|
|
|
|
import (
|
2016-02-08 22:29:21 +00:00
|
|
|
"bufio"
|
2014-11-25 18:40:24 +00:00
|
|
|
"bytes"
|
2017-08-11 22:31:16 +00:00
|
|
|
"context"
|
2014-11-25 18:40:24 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"strings"
|
2015-01-16 18:18:42 +00:00
|
|
|
"time"
|
2014-11-25 18:40:24 +00:00
|
|
|
|
2020-08-24 11:18:39 +00:00
|
|
|
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
|
|
|
|
"github.com/distribution/distribution/v3/registry/storage/driver/base"
|
|
|
|
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
|
2014-11-25 18:40:24 +00:00
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
2014-11-25 18:40:24 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const driverName = "azure"
|
|
|
|
|
|
|
|
const (
|
2020-02-21 03:58:17 +00:00
|
|
|
maxChunkSize = 4 * 1024 * 1024
|
2014-11-25 18:40:24 +00:00
|
|
|
)
|
|
|
|
|
2015-02-04 07:21:39 +00:00
|
|
|
type driver struct {
|
2020-02-21 03:58:17 +00:00
|
|
|
azClient *azureClient
|
|
|
|
client *container.Client
|
|
|
|
rootDirectory string
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
2015-02-04 07:21:39 +00:00
|
|
|
type baseEmbed struct{ base.Base }
|
|
|
|
|
|
|
|
// Driver is a storagedriver.StorageDriver implementation backed by
|
|
|
|
// Microsoft Azure Blob Storage Service.
|
|
|
|
type Driver struct{ baseEmbed }
|
|
|
|
|
2014-11-25 18:40:24 +00:00
|
|
|
func init() {
|
|
|
|
factory.Register(driverName, &azureDriverFactory{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type azureDriverFactory struct{}
|
|
|
|
|
2015-01-16 18:18:42 +00:00
|
|
|
func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
|
2020-02-21 03:58:17 +00:00
|
|
|
params, err := NewParameters(parameters)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
return New(params)
|
|
|
|
}
|
2014-11-25 18:40:24 +00:00
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
// New constructs a new Driver from parameters
|
|
|
|
func New(params *Parameters) (*Driver, error) {
|
|
|
|
azClient, err := newAzureClient(params)
|
|
|
|
if err != nil {
|
2014-11-25 18:40:24 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
client := azClient.ContainerClient()
|
2015-02-04 07:21:39 +00:00
|
|
|
d := &driver{
|
2020-02-21 03:58:17 +00:00
|
|
|
azClient: azClient,
|
|
|
|
client: client,
|
|
|
|
rootDirectory: params.RootDirectory}
|
2015-02-04 07:21:39 +00:00
|
|
|
return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implement the storagedriver.StorageDriver interface.
|
2015-04-23 00:30:01 +00:00
|
|
|
func (d *driver) Name() string {
|
|
|
|
return driverName
|
|
|
|
}
|
2014-11-25 18:40:24 +00:00
|
|
|
|
|
|
|
// GetContent retrieves the content stored at "path" as a []byte.
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
2020-02-21 03:58:17 +00:00
|
|
|
downloadResponse, err := d.client.NewBlobClient(d.blobName(path)).DownloadStream(ctx, nil)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
|
|
|
if is404(err) {
|
|
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
body := downloadResponse.Body
|
|
|
|
defer body.Close()
|
|
|
|
return io.ReadAll(body)
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutContent stores the []byte content at a location designated by "path".
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
|
2018-05-21 19:05:11 +00:00
|
|
|
// max size for block blobs uploaded via single "Put Blob" for version after "2016-05-31"
|
|
|
|
// https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks
|
|
|
|
const limit = 256 * 1024 * 1024
|
|
|
|
if len(contents) > limit {
|
2016-11-30 20:24:54 +00:00
|
|
|
return fmt.Errorf("uploading %d bytes with PutContent is not supported; limit: %d bytes", len(contents), limit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Historically, blobs uploaded via PutContent used to be of type AppendBlob
|
2020-08-24 11:18:39 +00:00
|
|
|
// (https://github.com/distribution/distribution/pull/1438). We can't replace
|
2016-11-30 20:24:54 +00:00
|
|
|
// these blobs atomically via a single "Put Blob" operation without
|
|
|
|
// deleting them first. Once we detect they are BlockBlob type, we can
|
|
|
|
// overwrite them with an atomically "Put Blob" operation.
|
|
|
|
//
|
|
|
|
// While we delete the blob and create a new one, there will be a small
|
|
|
|
// window of inconsistency and if the Put Blob fails, we may end up with
|
|
|
|
// losing the existing data while migrating it to BlockBlob type. However,
|
|
|
|
// expectation is the clients pushing will be retrying when they get an error
|
|
|
|
// response.
|
2020-02-21 03:58:17 +00:00
|
|
|
blobName := d.blobName(path)
|
|
|
|
blobRef := d.client.NewBlobClient(blobName)
|
|
|
|
props, err := blobRef.GetProperties(ctx, nil)
|
2016-11-30 20:24:54 +00:00
|
|
|
if err != nil && !is404(err) {
|
|
|
|
return fmt.Errorf("failed to get blob properties: %v", err)
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
if err == nil && props.BlobType != nil && *props.BlobType != blob.BlobTypeBlockBlob {
|
|
|
|
if _, err := blobRef.Delete(ctx, nil); err != nil {
|
|
|
|
return fmt.Errorf("failed to delete legacy blob (%v): %v", *props.BlobType, err)
|
2016-11-30 20:24:54 +00:00
|
|
|
}
|
2015-06-11 22:30:18 +00:00
|
|
|
}
|
2016-11-30 20:24:54 +00:00
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
_, err = d.client.NewBlockBlobClient(blobName).UploadBuffer(ctx, contents, nil)
|
|
|
|
return err
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
2016-02-08 22:29:21 +00:00
|
|
|
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
|
2014-11-25 18:40:24 +00:00
|
|
|
// given byte offset.
|
2016-02-08 22:29:21 +00:00
|
|
|
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
|
2020-02-21 03:58:17 +00:00
|
|
|
blobRef := d.client.NewBlobClient(d.blobName(path))
|
|
|
|
options := blob.DownloadStreamOptions{
|
|
|
|
Range: blob.HTTPRange{
|
|
|
|
Offset: offset,
|
|
|
|
},
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
props, err := blobRef.GetProperties(ctx, nil)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
2020-02-21 03:58:17 +00:00
|
|
|
if is404(err) {
|
|
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("failed to get blob properties: %v", err)
|
|
|
|
}
|
|
|
|
if props.ContentLength == nil {
|
|
|
|
return nil, fmt.Errorf("failed to get ContentLength for path: %s", path)
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
size := *props.ContentLength
|
2015-01-16 18:18:42 +00:00
|
|
|
if offset >= size {
|
2022-11-02 21:55:22 +00:00
|
|
|
return io.NopCloser(bytes.NewReader(nil)), nil
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
resp, err := blobRef.DownloadStream(ctx, &options)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
2020-02-21 03:58:17 +00:00
|
|
|
if is404(err) {
|
|
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
|
|
}
|
2014-11-25 18:40:24 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
return resp.Body, nil
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
2016-02-08 22:29:21 +00:00
|
|
|
// Writer returns a FileWriter which will store the content written to it
|
|
|
|
// at the location designated by "path" after the call to Commit.
|
|
|
|
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
|
2020-02-21 03:58:17 +00:00
|
|
|
blobName := d.blobName(path)
|
|
|
|
blobRef := d.client.NewBlobClient(blobName)
|
|
|
|
|
|
|
|
props, err := blobRef.GetProperties(ctx, nil)
|
|
|
|
blobExists := true
|
2016-02-08 22:29:21 +00:00
|
|
|
if err != nil {
|
2020-02-21 03:58:17 +00:00
|
|
|
if !is404(err) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
blobExists = false
|
2016-02-08 22:29:21 +00:00
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
|
2016-02-08 22:29:21 +00:00
|
|
|
var size int64
|
|
|
|
if blobExists {
|
|
|
|
if append {
|
2020-02-21 03:58:17 +00:00
|
|
|
if props.ContentLength == nil {
|
|
|
|
return nil, fmt.Errorf("cannot append to blob because no ContentLength property was returned for: %s", blobName)
|
2016-02-08 22:29:21 +00:00
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
size = *props.ContentLength
|
2016-02-08 22:29:21 +00:00
|
|
|
} else {
|
2020-02-21 03:58:17 +00:00
|
|
|
if _, err := blobRef.Delete(ctx, nil); err != nil {
|
2016-02-08 22:29:21 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if append {
|
|
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
if _, err = d.client.NewAppendBlobClient(blobName).Create(ctx, nil); err != nil {
|
2016-02-08 22:29:21 +00:00
|
|
|
return nil, err
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
return d.newWriter(ctx, blobName, size), nil
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
2015-01-16 18:18:42 +00:00
|
|
|
// Stat retrieves the FileInfo for the given path, including the current size
|
|
|
|
// in bytes and the creation time.
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
|
2020-02-21 03:58:17 +00:00
|
|
|
blobName := d.blobName(path)
|
|
|
|
blobRef := d.client.NewBlobClient(blobName)
|
2015-01-16 18:18:42 +00:00
|
|
|
// Check if the path is a blob
|
2020-02-21 03:58:17 +00:00
|
|
|
props, err := blobRef.GetProperties(ctx, nil)
|
|
|
|
if err != nil && !is404(err) {
|
2015-01-16 18:18:42 +00:00
|
|
|
return nil, err
|
2020-02-21 03:58:17 +00:00
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
var missing []string
|
|
|
|
if props.ContentLength == nil {
|
|
|
|
missing = append(missing, "ContentLength")
|
|
|
|
}
|
|
|
|
if props.LastModified == nil {
|
|
|
|
missing = append(missing, "LastModified")
|
2015-01-16 18:18:42 +00:00
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
if len(missing) > 0 {
|
|
|
|
return nil, fmt.Errorf("required blob properties %s are missing for blob: %s", missing, blobName)
|
|
|
|
}
|
2015-01-16 18:18:42 +00:00
|
|
|
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
|
|
|
|
Path: path,
|
2020-02-21 03:58:17 +00:00
|
|
|
Size: *props.ContentLength,
|
|
|
|
ModTime: *props.LastModified,
|
2015-01-16 18:18:42 +00:00
|
|
|
IsDir: false,
|
|
|
|
}}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if path is a virtual container
|
2020-02-21 03:58:17 +00:00
|
|
|
virtContainerPath := blobName
|
2015-01-16 18:18:42 +00:00
|
|
|
if !strings.HasSuffix(virtContainerPath, "/") {
|
|
|
|
virtContainerPath += "/"
|
|
|
|
}
|
2017-04-14 01:05:38 +00:00
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
maxResults := int32(1)
|
|
|
|
pager := d.client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
|
|
|
MaxResults: &maxResults,
|
|
|
|
Prefix: &virtContainerPath,
|
2015-01-16 18:18:42 +00:00
|
|
|
})
|
2020-02-21 03:58:17 +00:00
|
|
|
for pager.More() {
|
|
|
|
resp, err := pager.NextPage(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(resp.Segment.BlobItems) > 0 {
|
|
|
|
// path is a virtual container
|
|
|
|
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
|
|
|
|
Path: path,
|
|
|
|
IsDir: true,
|
|
|
|
}}, nil
|
|
|
|
}
|
2015-01-16 18:18:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// path is not a blob or virtual container
|
|
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List returns a list of the objects that are direct descendants of the given
|
|
|
|
// path.
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
|
2014-11-25 18:40:24 +00:00
|
|
|
if path == "/" {
|
|
|
|
path = ""
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
blobs, err := d.listBlobs(ctx, path)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return blobs, err
|
|
|
|
}
|
|
|
|
|
|
|
|
list := directDescendants(blobs, path)
|
2016-02-08 22:29:21 +00:00
|
|
|
if path != "" && len(list) == 0 {
|
|
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
|
|
}
|
2014-11-25 18:40:24 +00:00
|
|
|
return list, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
|
|
|
// object.
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
|
2020-02-21 03:58:17 +00:00
|
|
|
sourceBlobURL, err := d.URLFor(ctx, sourcePath, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
destBlobRef := d.client.NewBlockBlobClient(d.blobName(destPath))
|
|
|
|
_, err = destBlobRef.CopyFromURL(ctx, sourceBlobURL, nil)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
|
|
|
if is404(err) {
|
|
|
|
return storagedriver.PathNotFoundError{Path: sourcePath}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
_, err = d.client.NewBlobClient(d.blobName(sourcePath)).Delete(ctx, nil)
|
|
|
|
return err
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) Delete(ctx context.Context, path string) error {
|
2020-02-21 03:58:17 +00:00
|
|
|
blobRef := d.client.NewBlobClient(d.blobName(path))
|
|
|
|
_, err := blobRef.Delete(ctx, nil)
|
|
|
|
if err == nil {
|
|
|
|
// was a blob and deleted, return
|
|
|
|
return nil
|
|
|
|
} else if !is404(err) {
|
2014-11-25 18:40:24 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not a blob, see if path is a virtual container with blobs
|
2020-02-21 03:58:17 +00:00
|
|
|
blobs, err := d.listBlobs(ctx, path)
|
2014-11-25 18:40:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, b := range blobs {
|
2020-02-21 03:58:17 +00:00
|
|
|
blobRef := d.client.NewBlobClient(d.blobName(b))
|
|
|
|
if _, err := blobRef.Delete(ctx, nil); err != nil {
|
2014-11-25 18:40:24 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(blobs) == 0 {
|
|
|
|
return storagedriver.PathNotFoundError{Path: path}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-01-16 18:18:42 +00:00
|
|
|
// URLFor returns a publicly accessible URL for the blob stored at given path
|
|
|
|
// for specified duration by making use of Azure Storage Shared Access Signatures (SAS).
|
|
|
|
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info.
|
2015-04-27 22:58:58 +00:00
|
|
|
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
|
2015-01-16 18:18:42 +00:00
|
|
|
expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration
|
|
|
|
expires, ok := options["expiry"]
|
|
|
|
if ok {
|
|
|
|
t, ok := expires.(time.Time)
|
|
|
|
if ok {
|
|
|
|
expiresTime = t
|
|
|
|
}
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
blobName := d.blobName(path)
|
|
|
|
blobRef := d.client.NewBlobClient(blobName)
|
|
|
|
return d.azClient.SignBlobURL(ctx, blobRef.URL(), expiresTime)
|
2015-01-16 18:18:42 +00:00
|
|
|
}
|
|
|
|
|
2017-11-29 19:17:39 +00:00
|
|
|
// Walk traverses a filesystem defined within driver, starting
|
2021-06-24 18:42:02 +00:00
|
|
|
// from the given path, calling f on each file and directory
|
2017-11-29 19:17:39 +00:00
|
|
|
func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error {
|
|
|
|
return storagedriver.WalkFallback(ctx, d, path, f)
|
|
|
|
}
|
|
|
|
|
2014-11-25 18:40:24 +00:00
|
|
|
// directDescendants will find direct descendants (blobs or virtual containers)
|
|
|
|
// of from list of blob paths and will return their full paths. Elements in blobs
|
|
|
|
// list must be prefixed with a "/" and
|
|
|
|
//
|
|
|
|
// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is
|
|
|
|
// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"}
|
|
|
|
func directDescendants(blobs []string, prefix string) []string {
|
|
|
|
if !strings.HasPrefix(prefix, "/") { // add trailing '/'
|
|
|
|
prefix = "/" + prefix
|
|
|
|
}
|
|
|
|
if !strings.HasSuffix(prefix, "/") { // containerify the path
|
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make(map[string]bool)
|
|
|
|
for _, b := range blobs {
|
|
|
|
if strings.HasPrefix(b, prefix) {
|
|
|
|
rel := b[len(prefix):]
|
|
|
|
c := strings.Count(rel, "/")
|
|
|
|
if c == 0 {
|
|
|
|
out[b] = true
|
|
|
|
} else {
|
|
|
|
out[prefix+rel[:strings.Index(rel, "/")]] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var keys []string
|
|
|
|
for k := range out {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
return keys
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
func (d *driver) listBlobs(ctx context.Context, virtPath string) ([]string, error) {
|
2014-11-25 18:40:24 +00:00
|
|
|
if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path
|
|
|
|
virtPath += "/"
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
// we will replace the root directory prefix before returning blob names
|
|
|
|
blobPrefix := d.blobName("")
|
|
|
|
|
|
|
|
// This is to cover for the cases when the rootDirectory of the driver is either "" or "/".
|
|
|
|
// In those cases, there is no root prefix to replace and we must actually add a "/" to all
|
|
|
|
// results in order to keep them as valid paths as recognized by storagedriver.PathRegexp
|
|
|
|
prefix := ""
|
|
|
|
if blobPrefix == "" {
|
|
|
|
prefix = "/"
|
|
|
|
}
|
|
|
|
|
2014-11-25 18:40:24 +00:00
|
|
|
out := []string{}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
listPrefix := d.blobName(virtPath)
|
|
|
|
pager := d.client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
|
|
|
Prefix: &listPrefix,
|
|
|
|
})
|
|
|
|
for pager.More() {
|
|
|
|
resp, err := pager.NextPage(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
for _, blob := range resp.Segment.BlobItems {
|
|
|
|
if blob.Name == nil {
|
|
|
|
return nil, fmt.Errorf("required blob property Name is missing while listing blobs under: %s", listPrefix)
|
|
|
|
}
|
|
|
|
name := *blob.Name
|
|
|
|
out = append(out, strings.Replace(name, blobPrefix, prefix, 1))
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
|
2014-11-25 18:40:24 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
func (d *driver) blobName(path string) string {
|
|
|
|
return strings.TrimLeft(strings.TrimRight(d.rootDirectory, "/")+path, "/")
|
|
|
|
}
|
|
|
|
|
2014-11-25 18:40:24 +00:00
|
|
|
func is404(err error) bool {
|
2020-02-21 03:58:17 +00:00
|
|
|
return bloberror.HasCode(err, bloberror.BlobNotFound, bloberror.ContainerNotFound, bloberror.ResourceNotFound)
|
2016-02-08 22:29:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type writer struct {
|
|
|
|
driver *driver
|
|
|
|
path string
|
|
|
|
size int64
|
|
|
|
bw *bufio.Writer
|
|
|
|
closed bool
|
|
|
|
committed bool
|
|
|
|
cancelled bool
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
func (d *driver) newWriter(ctx context.Context, path string, size int64) storagedriver.FileWriter {
|
2016-02-08 22:29:21 +00:00
|
|
|
return &writer{
|
|
|
|
driver: d,
|
|
|
|
path: path,
|
|
|
|
size: size,
|
|
|
|
bw: bufio.NewWriterSize(&blockWriter{
|
2020-02-21 03:58:17 +00:00
|
|
|
ctx: ctx,
|
|
|
|
client: d.client,
|
|
|
|
path: path,
|
2016-02-08 22:29:21 +00:00
|
|
|
}, maxChunkSize),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *writer) Write(p []byte) (int, error) {
|
|
|
|
if w.closed {
|
|
|
|
return 0, fmt.Errorf("already closed")
|
|
|
|
} else if w.committed {
|
|
|
|
return 0, fmt.Errorf("already committed")
|
|
|
|
} else if w.cancelled {
|
|
|
|
return 0, fmt.Errorf("already cancelled")
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := w.bw.Write(p)
|
|
|
|
w.size += int64(n)
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *writer) Size() int64 {
|
|
|
|
return w.size
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *writer) Close() error {
|
|
|
|
if w.closed {
|
|
|
|
return fmt.Errorf("already closed")
|
|
|
|
}
|
|
|
|
w.closed = true
|
|
|
|
return w.bw.Flush()
|
|
|
|
}
|
|
|
|
|
2020-02-21 03:58:17 +00:00
|
|
|
func (w *writer) Cancel(ctx context.Context) error {
|
2016-02-08 22:29:21 +00:00
|
|
|
if w.closed {
|
|
|
|
return fmt.Errorf("already closed")
|
|
|
|
} else if w.committed {
|
|
|
|
return fmt.Errorf("already committed")
|
|
|
|
}
|
|
|
|
w.cancelled = true
|
2020-02-21 03:58:17 +00:00
|
|
|
blobRef := w.driver.client.NewBlobClient(w.path)
|
|
|
|
_, err := blobRef.Delete(ctx, nil)
|
|
|
|
return err
|
2016-02-08 22:29:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *writer) Commit() error {
|
|
|
|
if w.closed {
|
|
|
|
return fmt.Errorf("already closed")
|
|
|
|
} else if w.committed {
|
|
|
|
return fmt.Errorf("already committed")
|
|
|
|
} else if w.cancelled {
|
|
|
|
return fmt.Errorf("already cancelled")
|
|
|
|
}
|
|
|
|
w.committed = true
|
|
|
|
return w.bw.Flush()
|
|
|
|
}
|
|
|
|
|
|
|
|
type blockWriter struct {
|
2020-02-21 03:58:17 +00:00
|
|
|
// We construct transient blockWriter objects to encapsulate a write
|
|
|
|
// and need to keep the context passed in to the original FileWriter.Write
|
|
|
|
ctx context.Context
|
|
|
|
client *container.Client
|
|
|
|
path string
|
2016-02-08 22:29:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (bw *blockWriter) Write(p []byte) (int, error) {
|
2020-02-21 03:58:17 +00:00
|
|
|
blobRef := bw.client.NewAppendBlobClient(bw.path)
|
|
|
|
_, err := blobRef.AppendBlock(bw.ctx, streaming.NopCloser(bytes.NewReader(p)), nil)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2016-02-08 22:29:21 +00:00
|
|
|
}
|
2020-02-21 03:58:17 +00:00
|
|
|
return len(p), nil
|
2014-11-25 18:40:24 +00:00
|
|
|
}
|