forked from TrueCloudLab/distribution
Merge pull request #3982 from milosgajdos/remove-swift-storage-driver
Remove SWIFT storage driver
This commit is contained in:
commit
65b57464f9
31 changed files with 3 additions and 6692 deletions
|
@ -17,7 +17,6 @@ import (
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/redirect"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/redirect"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/oss"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/oss"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/swift"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
|
@ -139,18 +139,6 @@ storage:
|
||||||
multipartcopythresholdsize: 33554432
|
multipartcopythresholdsize: 33554432
|
||||||
rootdirectory: /s3/object/name/prefix
|
rootdirectory: /s3/object/name/prefix
|
||||||
usedualstack: false
|
usedualstack: false
|
||||||
swift:
|
|
||||||
username: username
|
|
||||||
password: password
|
|
||||||
authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
|
|
||||||
tenant: tenantname
|
|
||||||
tenantid: tenantid
|
|
||||||
domain: domain name for Openstack Identity v3 API
|
|
||||||
domainid: domain id for Openstack Identity v3 API
|
|
||||||
insecureskipverify: true
|
|
||||||
region: fr
|
|
||||||
container: containername
|
|
||||||
rootdirectory: /swift/object/name/prefix
|
|
||||||
oss:
|
oss:
|
||||||
accesskeyid: accesskeyid
|
accesskeyid: accesskeyid
|
||||||
accesskeysecret: accesskeysecret
|
accesskeysecret: accesskeysecret
|
||||||
|
@ -447,18 +435,6 @@ storage:
|
||||||
multipartcopymaxconcurrency: 100
|
multipartcopymaxconcurrency: 100
|
||||||
multipartcopythresholdsize: 33554432
|
multipartcopythresholdsize: 33554432
|
||||||
rootdirectory: /s3/object/name/prefix
|
rootdirectory: /s3/object/name/prefix
|
||||||
swift:
|
|
||||||
username: username
|
|
||||||
password: password
|
|
||||||
authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
|
|
||||||
tenant: tenantname
|
|
||||||
tenantid: tenantid
|
|
||||||
domain: domain name for Openstack Identity v3 API
|
|
||||||
domainid: domain id for Openstack Identity v3 API
|
|
||||||
insecureskipverify: true
|
|
||||||
region: fr
|
|
||||||
container: containername
|
|
||||||
rootdirectory: /swift/object/name/prefix
|
|
||||||
oss:
|
oss:
|
||||||
accesskeyid: accesskeyid
|
accesskeyid: accesskeyid
|
||||||
accesskeysecret: accesskeysecret
|
accesskeysecret: accesskeysecret
|
||||||
|
@ -499,7 +475,6 @@ returns an error. You can choose any of these backend storage drivers:
|
||||||
| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/azure.md). |
|
| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/azure.md). |
|
||||||
| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/gcs.md). |
|
| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/gcs.md). |
|
||||||
| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/s3.md). |
|
| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/s3.md). |
|
||||||
| `swift` | Uses Openstack Swift object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/swift.md). |
|
|
||||||
| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/oss.md). |
|
| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/oss.md). |
|
||||||
|
|
||||||
For testing only, you can use the [`inmemory` storage
|
For testing only, you can use the [`inmemory` storage
|
||||||
|
|
|
@ -15,7 +15,7 @@ Users interact with a registry by using docker push and pull commands.
|
||||||
|
|
||||||
Storage itself is delegated to drivers. The default storage driver is the local
|
Storage itself is delegated to drivers. The default storage driver is the local
|
||||||
posix filesystem, which is suitable for development or small deployments.
|
posix filesystem, which is suitable for development or small deployments.
|
||||||
Additional cloud-based storage drivers like S3, Microsoft Azure, OpenStack Swift,
|
Additional cloud-based storage drivers like S3, Microsoft Azure, Google Cloud Storage
|
||||||
and Aliyun OSS are also supported. People looking into using other storage
|
and Aliyun OSS are also supported. People looking into using other storage
|
||||||
backends may do so by writing their own driver implementing the
|
backends may do so by writing their own driver implementing the
|
||||||
[Storage API](storage-drivers/index.md).
|
[Storage API](storage-drivers/index.md).
|
||||||
|
|
|
@ -16,9 +16,9 @@ This storage driver package comes bundled with several drivers:
|
||||||
- [filesystem](filesystem.md): A local storage driver configured to use a directory tree in the local filesystem.
|
- [filesystem](filesystem.md): A local storage driver configured to use a directory tree in the local filesystem.
|
||||||
- [s3](s3.md): A driver storing objects in an Amazon Simple Storage Service (S3) bucket.
|
- [s3](s3.md): A driver storing objects in an Amazon Simple Storage Service (S3) bucket.
|
||||||
- [azure](azure.md): A driver storing objects in [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/).
|
- [azure](azure.md): A driver storing objects in [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/).
|
||||||
- [swift](swift.md): A driver storing objects in [Openstack Swift](https://docs.openstack.org/swift/latest/).
|
|
||||||
- [oss](oss.md): A driver storing objects in [Aliyun OSS](https://www.aliyun.com/product/oss).
|
|
||||||
- [gcs](gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket.
|
- [gcs](gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket.
|
||||||
|
- [oss](oss.md): A driver storing objects in [Aliyun OSS](https://www.aliyun.com/product/oss).
|
||||||
|
- swift: *NO LONGER SUPPORTED*
|
||||||
|
|
||||||
## Storage driver API
|
## Storage driver API
|
||||||
|
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
---
|
|
||||||
description: Explains how to use the OpenStack swift storage driver
|
|
||||||
keywords: registry, service, driver, images, storage, swift
|
|
||||||
title: OpenStack Swift storage driver
|
|
||||||
---
|
|
||||||
|
|
||||||
An implementation of the `storagedriver.StorageDriver` interface that uses
|
|
||||||
[OpenStack Swift](http://docs.openstack.org/developer/swift/) for object
|
|
||||||
storage.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
| Parameter | Required | Description |
|
|
||||||
|:--------------|:---------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `authurl` | yes | URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth |
|
|
||||||
| `username` | yes | Your Openstack user name. |
|
|
||||||
| `password` | yes | Your Openstack password. |
|
|
||||||
| `region` | no | The Openstack region in which your container exists. |
|
|
||||||
| `container` | yes | The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization. |
|
|
||||||
| `tenant` | no | Your Openstack tenant name. You can either use `tenant` or `tenantid`. |
|
|
||||||
| `tenantid` | no | Your Openstack tenant name. You can either use `tenant` or `tenantid`. |
|
|
||||||
| `domain` | no | Your Openstack domain name for Identity v3 API. You can either use `domain` or `domainid`. |
|
|
||||||
| `domainid` | no | Your Openstack domain name for Identity v3 API. You can either use `domain` or `domainid`. |
|
|
||||||
| `tenantdomain` | no | Your tenant's Openstack domain name for Identity v3 API. Only necessary if different from the <code>domain</code>. You can either use `tenantdomain` or `tenantdomainid`. |
|
|
||||||
| `tenantdomainid` | no | Your tenant's Openstack domain id for Identity v3 API. Only necessary if different from the <code>domain</code>. You can either use `tenantdomain` or `tenantdomainid`. |
|
|
||||||
| `trustid` | no | Your Openstack trust ID for Identity v3 API. |
|
|
||||||
| `insecureskipverify` | no | Skips TLS verification if the value is wet to `true`. The default is `false`. |
|
|
||||||
| `chunksize` | no | Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). |
|
|
||||||
| `prefix` | no | This is a prefix that is applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. |
|
|
||||||
| `secretkey` | no | The secret key used to generate temporary URLs. |
|
|
||||||
| `accesskey` | no | The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter. |
|
|
||||||
| `authversion` | no | Specify the OpenStack Auth's version, for example `3`. By default the driver autodetects the auth's version from the AuthURL. |
|
|
||||||
| `endpointtype` | no | The endpoint type used when connecting to swift. Possible values are `public`, `internal`, and `admin`. The default is `public`. |
|
|
||||||
|
|
||||||
The features supported by the Swift server are queried by requesting the `/info`
|
|
||||||
URL on the server. In case the administrator disabled that feature, the
|
|
||||||
configuration file can specify the following optional parameters :
|
|
||||||
|
|
||||||
| Optional parameter | Description |
|
|
||||||
|:--------------|:---------|
|
|
||||||
| `tempurlcontainerkey` | Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise. |
|
|
||||||
| `tempurlmethods` | Array of HTTP methods that are supported by the TempURL middleware of the Swift server. For example: `["GET", "PUT", "HEAD", "POST", "DELETE"]` |
|
|
1
go.mod
1
go.mod
|
@ -22,7 +22,6 @@ require (
|
||||||
github.com/hashicorp/golang-lru v0.5.4
|
github.com/hashicorp/golang-lru v0.5.4
|
||||||
github.com/klauspost/compress v1.16.5
|
github.com/klauspost/compress v1.16.5
|
||||||
github.com/mitchellh/mapstructure v1.1.2
|
github.com/mitchellh/mapstructure v1.1.2
|
||||||
github.com/ncw/swift v1.0.47
|
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.2
|
github.com/opencontainers/image-spec v1.0.2
|
||||||
github.com/prometheus/client_golang v1.12.1 // indirect; updated to latest
|
github.com/prometheus/client_golang v1.12.1 // indirect; updated to latest
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -244,8 +244,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
|
|
||||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||||
|
|
|
@ -1,932 +0,0 @@
|
||||||
// Package swift provides a storagedriver.StorageDriver implementation to
|
|
||||||
// store blobs in Openstack Swift object storage.
|
|
||||||
//
|
|
||||||
// This package leverages the ncw/swift client library for interfacing with
|
|
||||||
// Swift.
|
|
||||||
//
|
|
||||||
// It supports both TempAuth authentication and Keystone authentication
|
|
||||||
// (up to version 3).
|
|
||||||
//
|
|
||||||
// As Swift has a limit on the size of a single uploaded object (by default
|
|
||||||
// this is 5GB), the driver makes use of the Swift Large Object Support
|
|
||||||
// (http://docs.openstack.org/developer/swift/overview_large_objects.html).
|
|
||||||
// Only one container is used for both manifests and data objects. Manifests
|
|
||||||
// are stored in the 'files' pseudo directory, data objects are stored under
|
|
||||||
// 'segments'.
|
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/mitchellh/mapstructure"
|
|
||||||
"github.com/ncw/swift"
|
|
||||||
|
|
||||||
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
|
|
||||||
"github.com/distribution/distribution/v3/registry/storage/driver/base"
|
|
||||||
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
|
|
||||||
"github.com/distribution/distribution/v3/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
const driverName = "swift"
|
|
||||||
|
|
||||||
// defaultChunkSize defines the default size of a segment
|
|
||||||
const defaultChunkSize = 20 * 1024 * 1024
|
|
||||||
|
|
||||||
// minChunkSize defines the minimum size of a segment
|
|
||||||
const minChunkSize = 1 << 20
|
|
||||||
|
|
||||||
// contentType defines the Content-Type header associated with stored segments
|
|
||||||
const contentType = "application/octet-stream"
|
|
||||||
|
|
||||||
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
|
|
||||||
var readAfterWriteTimeout = 15 * time.Second
|
|
||||||
|
|
||||||
// readAfterWriteWait defines the time to sleep between two retries
|
|
||||||
var readAfterWriteWait = 200 * time.Millisecond
|
|
||||||
|
|
||||||
// Parameters A struct that encapsulates all of the driver parameters after all values have been set
|
|
||||||
type Parameters struct {
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
AuthURL string
|
|
||||||
Tenant string
|
|
||||||
TenantID string
|
|
||||||
Domain string
|
|
||||||
DomainID string
|
|
||||||
TenantDomain string
|
|
||||||
TenantDomainID string
|
|
||||||
TrustID string
|
|
||||||
Region string
|
|
||||||
AuthVersion int
|
|
||||||
Container string
|
|
||||||
Prefix string
|
|
||||||
EndpointType string
|
|
||||||
InsecureSkipVerify bool
|
|
||||||
ChunkSize int
|
|
||||||
SecretKey string
|
|
||||||
AccessKey string
|
|
||||||
TempURLContainerKey bool
|
|
||||||
TempURLMethods []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// swiftInfo maps the JSON structure returned by Swift /info endpoint
|
|
||||||
type swiftInfo struct {
|
|
||||||
Swift struct {
|
|
||||||
Version string `mapstructure:"version"`
|
|
||||||
}
|
|
||||||
Tempurl struct {
|
|
||||||
Methods []string `mapstructure:"methods"`
|
|
||||||
}
|
|
||||||
BulkDelete struct {
|
|
||||||
MaxDeletesPerRequest int `mapstructure:"max_deletes_per_request"`
|
|
||||||
} `mapstructure:"bulk_delete"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
factory.Register(driverName, &swiftDriverFactory{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// swiftDriverFactory implements the factory.StorageDriverFactory interface
|
|
||||||
type swiftDriverFactory struct{}
|
|
||||||
|
|
||||||
func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
|
|
||||||
return FromParameters(parameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
type driver struct {
|
|
||||||
Conn *swift.Connection
|
|
||||||
Container string
|
|
||||||
Prefix string
|
|
||||||
BulkDeleteSupport bool
|
|
||||||
BulkDeleteMaxDeletes int
|
|
||||||
ChunkSize int
|
|
||||||
SecretKey string
|
|
||||||
AccessKey string
|
|
||||||
TempURLContainerKey bool
|
|
||||||
TempURLMethods []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type baseEmbed struct {
|
|
||||||
base.Base
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift
|
|
||||||
// Objects are stored at absolute keys in the provided container.
|
|
||||||
type Driver struct {
|
|
||||||
baseEmbed
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromParameters constructs a new Driver with a given parameters map
|
|
||||||
// Required parameters:
|
|
||||||
// - username
|
|
||||||
// - password
|
|
||||||
// - authurl
|
|
||||||
// - container
|
|
||||||
func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|
||||||
params := Parameters{
|
|
||||||
ChunkSize: defaultChunkSize,
|
|
||||||
InsecureSkipVerify: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize some entries before trying to decode parameters with mapstructure
|
|
||||||
// TenantID and Tenant when integers only and passed as ENV variables
|
|
||||||
// are considered as integer and not string. The parser fails in this
|
|
||||||
// case.
|
|
||||||
_, ok := parameters["tenant"]
|
|
||||||
if ok {
|
|
||||||
parameters["tenant"] = fmt.Sprint(parameters["tenant"])
|
|
||||||
}
|
|
||||||
_, ok = parameters["tenantid"]
|
|
||||||
if ok {
|
|
||||||
parameters["tenantid"] = fmt.Sprint(parameters["tenantid"])
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mapstructure.Decode(parameters, ¶ms); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.Username == "" {
|
|
||||||
return nil, fmt.Errorf("no username parameter provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.Password == "" {
|
|
||||||
return nil, fmt.Errorf("no password parameter provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.AuthURL == "" {
|
|
||||||
return nil, fmt.Errorf("no authurl parameter provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.Container == "" {
|
|
||||||
return nil, fmt.Errorf("no container parameter provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.ChunkSize < minChunkSize {
|
|
||||||
return nil, fmt.Errorf("the chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
return New(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New constructs a new Driver with the given Openstack Swift credentials and container name
|
|
||||||
func New(params Parameters) (*Driver, error) {
|
|
||||||
transport := &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
MaxIdleConnsPerHost: 2048,
|
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify},
|
|
||||||
}
|
|
||||||
|
|
||||||
ct := &swift.Connection{
|
|
||||||
UserName: params.Username,
|
|
||||||
ApiKey: params.Password,
|
|
||||||
AuthUrl: params.AuthURL,
|
|
||||||
Region: params.Region,
|
|
||||||
AuthVersion: params.AuthVersion,
|
|
||||||
UserAgent: "distribution/" + version.Version,
|
|
||||||
Tenant: params.Tenant,
|
|
||||||
TenantId: params.TenantID,
|
|
||||||
Domain: params.Domain,
|
|
||||||
DomainId: params.DomainID,
|
|
||||||
TenantDomain: params.TenantDomain,
|
|
||||||
TenantDomainId: params.TenantDomainID,
|
|
||||||
TrustId: params.TrustID,
|
|
||||||
EndpointType: swift.EndpointType(params.EndpointType),
|
|
||||||
Transport: transport,
|
|
||||||
ConnectTimeout: 60 * time.Second,
|
|
||||||
Timeout: 15 * 60 * time.Second,
|
|
||||||
}
|
|
||||||
err := ct.Authenticate()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("swift authentication failed: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound {
|
|
||||||
if err := ct.ContainerCreate(params.Container, nil); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create container %s (%s)", params.Container, err)
|
|
||||||
}
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to retrieve info about container %s (%s)", params.Container, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &driver{
|
|
||||||
Conn: ct,
|
|
||||||
Container: params.Container,
|
|
||||||
Prefix: params.Prefix,
|
|
||||||
ChunkSize: params.ChunkSize,
|
|
||||||
TempURLMethods: make([]string, 0),
|
|
||||||
AccessKey: params.AccessKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
info := swiftInfo{}
|
|
||||||
if config, err := d.Conn.QueryInfo(); err == nil {
|
|
||||||
_, d.BulkDeleteSupport = config["bulk_delete"]
|
|
||||||
|
|
||||||
if err := mapstructure.Decode(config, &info); err == nil {
|
|
||||||
d.TempURLContainerKey = info.Swift.Version >= "2.3.0"
|
|
||||||
d.TempURLMethods = info.Tempurl.Methods
|
|
||||||
if d.BulkDeleteSupport {
|
|
||||||
d.BulkDeleteMaxDeletes = info.BulkDelete.MaxDeletesPerRequest
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
d.TempURLContainerKey = params.TempURLContainerKey
|
|
||||||
d.TempURLMethods = params.TempURLMethods
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(d.TempURLMethods) > 0 {
|
|
||||||
secretKey := params.SecretKey
|
|
||||||
if secretKey == "" {
|
|
||||||
secretKey, _ = generateSecret()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since Swift 2.2.2, we can now set secret keys on containers
|
|
||||||
// in addition to the account secret keys. Use them in preference.
|
|
||||||
if d.TempURLContainerKey {
|
|
||||||
_, containerHeaders, err := d.Conn.Container(d.Container)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to fetch container info %s (%s)", d.Container, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"]
|
|
||||||
if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) {
|
|
||||||
m := swift.Metadata{}
|
|
||||||
m["temp-url-key"] = secretKey
|
|
||||||
if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil {
|
|
||||||
d.SecretKey = secretKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use the account secret key
|
|
||||||
_, accountHeaders, err := d.Conn.Account()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to fetch account info (%s)", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"]
|
|
||||||
if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) {
|
|
||||||
m := swift.Metadata{}
|
|
||||||
m["temp-url-key"] = secretKey
|
|
||||||
if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil {
|
|
||||||
d.SecretKey = secretKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Driver{
|
|
||||||
baseEmbed: baseEmbed{
|
|
||||||
Base: base.Base{
|
|
||||||
StorageDriver: d,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implement the storagedriver.StorageDriver interface
|
|
||||||
|
|
||||||
func (d *driver) Name() string {
|
|
||||||
return driverName
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContent retrieves the content stored at "path" as a []byte.
|
|
||||||
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
|
||||||
content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path))
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return content, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutContent stores the []byte content at a location designated by "path".
|
|
||||||
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
|
|
||||||
err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType)
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
|
|
||||||
// given byte offset.
|
|
||||||
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
|
|
||||||
headers := make(swift.Headers)
|
|
||||||
headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-"
|
|
||||||
|
|
||||||
waitingTime := readAfterWriteWait
|
|
||||||
endTime := time.Now().Add(readAfterWriteTimeout)
|
|
||||||
|
|
||||||
for {
|
|
||||||
file, headers, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers)
|
|
||||||
if err != nil {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable {
|
|
||||||
return io.NopCloser(bytes.NewReader(nil)), nil
|
|
||||||
}
|
|
||||||
return file, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this is a DLO and it is clear that segments are still missing,
|
|
||||||
// wait until they show up
|
|
||||||
_, isDLO := headers["X-Object-Manifest"]
|
|
||||||
size, err := file.Length()
|
|
||||||
if err != nil {
|
|
||||||
return file, err
|
|
||||||
}
|
|
||||||
if isDLO && size == 0 {
|
|
||||||
if time.Now().Add(waitingTime).After(endTime) {
|
|
||||||
return nil, fmt.Errorf("timeout expired while waiting for segments of %s to show up", path)
|
|
||||||
}
|
|
||||||
time.Sleep(waitingTime)
|
|
||||||
waitingTime *= 2
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// if not, then this reader will be fine
|
|
||||||
return file, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer returns a FileWriter which will store the content written to it
|
|
||||||
// at the location designated by "path" after the call to Commit.
|
|
||||||
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
|
|
||||||
var (
|
|
||||||
segments []swift.Object
|
|
||||||
segmentsPath string
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if !append {
|
|
||||||
segmentsPath, err = d.swiftSegmentPath(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path))
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
manifest, ok := headers["X-Object-Manifest"]
|
|
||||||
if !ok {
|
|
||||||
segmentsPath, err = d.swiftSegmentPath(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
segments = []swift.Object{info}
|
|
||||||
} else {
|
|
||||||
_, segmentsPath = parseManifest(manifest)
|
|
||||||
if segments, err = d.getAllSegments(segmentsPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.newWriter(path, segmentsPath, segments), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat retrieves the FileInfo for the given path, including the current size
|
|
||||||
// in bytes and the creation time.
|
|
||||||
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
|
|
||||||
swiftPath := d.swiftPath(path)
|
|
||||||
opts := &swift.ObjectsOpts{
|
|
||||||
Prefix: swiftPath,
|
|
||||||
Delimiter: '/',
|
|
||||||
}
|
|
||||||
|
|
||||||
objects, err := d.Conn.ObjectsAll(d.Container, opts)
|
|
||||||
if err != nil {
|
|
||||||
if err == swift.ContainerNotFound {
|
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fi := storagedriver.FileInfoFields{
|
|
||||||
Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, obj := range objects {
|
|
||||||
if obj.PseudoDirectory && obj.Name == swiftPath+"/" {
|
|
||||||
fi.IsDir = true
|
|
||||||
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
|
|
||||||
} else if obj.Name == swiftPath {
|
|
||||||
// The file exists. But on Swift 1.12, the 'bytes' field is always 0 so
|
|
||||||
// we need to do a separate HEAD request.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't trust an empty `objects` slice. A container listing can be
|
|
||||||
// outdated. For files, we can make a HEAD request on the object which
|
|
||||||
// reports existence (at least) much more reliably.
|
|
||||||
waitingTime := readAfterWriteWait
|
|
||||||
endTime := time.Now().Add(readAfterWriteTimeout)
|
|
||||||
|
|
||||||
for {
|
|
||||||
info, headers, err := d.Conn.Object(d.Container, swiftPath)
|
|
||||||
if err != nil {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this is a DLO and it is clear that segments are still missing,
|
|
||||||
// wait until they show up
|
|
||||||
_, isDLO := headers["X-Object-Manifest"]
|
|
||||||
if isDLO && info.Bytes == 0 {
|
|
||||||
if time.Now().Add(waitingTime).After(endTime) {
|
|
||||||
return nil, fmt.Errorf("timeout expired while waiting for segments of %s to show up", path)
|
|
||||||
}
|
|
||||||
time.Sleep(waitingTime)
|
|
||||||
waitingTime *= 2
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise, accept the result
|
|
||||||
fi.IsDir = false
|
|
||||||
fi.Size = info.Bytes
|
|
||||||
fi.ModTime = info.LastModified
|
|
||||||
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns a list of the objects that are direct descendants of the given path.
|
|
||||||
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
|
|
||||||
var files []string
|
|
||||||
|
|
||||||
prefix := d.swiftPath(path)
|
|
||||||
if prefix != "" {
|
|
||||||
prefix += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := &swift.ObjectsOpts{
|
|
||||||
Prefix: prefix,
|
|
||||||
Delimiter: '/',
|
|
||||||
}
|
|
||||||
|
|
||||||
objects, err := d.Conn.ObjectsAll(d.Container, opts)
|
|
||||||
for _, obj := range objects {
|
|
||||||
files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/")))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") {
|
|
||||||
return files, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return files, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move moves an object stored at sourcePath to destPath, removing the original
|
|
||||||
// object.
|
|
||||||
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
|
|
||||||
_, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath))
|
|
||||||
if err == nil {
|
|
||||||
if manifest, ok := headers["X-Object-Manifest"]; ok {
|
|
||||||
if err = d.createManifest(destPath, manifest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath))
|
|
||||||
} else {
|
|
||||||
err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: sourcePath}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
|
||||||
func (d *driver) Delete(ctx context.Context, path string) error {
|
|
||||||
opts := swift.ObjectsOpts{
|
|
||||||
Prefix: d.swiftPath(path) + "/",
|
|
||||||
}
|
|
||||||
|
|
||||||
objects, err := d.Conn.ObjectsAll(d.Container, &opts)
|
|
||||||
if err != nil {
|
|
||||||
if err == swift.ContainerNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, obj := range objects {
|
|
||||||
if obj.PseudoDirectory {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil {
|
|
||||||
manifest, ok := headers["X-Object-Manifest"]
|
|
||||||
if ok {
|
|
||||||
_, prefix := parseManifest(manifest)
|
|
||||||
segments, err := d.getAllSegments(prefix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
objects = append(objects, segments...)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: obj.Name}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.BulkDeleteSupport && len(objects) > 0 && d.BulkDeleteMaxDeletes > 0 {
|
|
||||||
filenames := make([]string, len(objects))
|
|
||||||
for i, obj := range objects {
|
|
||||||
filenames[i] = obj.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks, err := chunkFilenames(filenames, d.BulkDeleteMaxDeletes)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, chunk := range chunks {
|
|
||||||
_, err := d.Conn.BulkDelete(d.Container, chunk)
|
|
||||||
// Don't fail on ObjectNotFound because eventual consistency
|
|
||||||
// makes this situation normal.
|
|
||||||
if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound {
|
|
||||||
if err == swift.ContainerNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _, obj := range objects {
|
|
||||||
if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: obj.Name}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err = d.Conn.Object(d.Container, d.swiftPath(path))
|
|
||||||
if err == nil {
|
|
||||||
if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if err == swift.ObjectNotFound {
|
|
||||||
if len(objects) == 0 {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// URLFor returns a URL which may be used to retrieve the content stored at the given path.
|
|
||||||
func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
|
|
||||||
if d.SecretKey == "" {
|
|
||||||
return "", storagedriver.ErrUnsupportedMethod{}
|
|
||||||
}
|
|
||||||
|
|
||||||
methodString := http.MethodGet
|
|
||||||
method, ok := options["method"]
|
|
||||||
if ok {
|
|
||||||
if methodString, ok = method.(string); !ok {
|
|
||||||
return "", storagedriver.ErrUnsupportedMethod{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if methodString == http.MethodHead {
|
|
||||||
// A "HEAD" request on a temporary URL is allowed if the
|
|
||||||
// signature was generated with "GET", "POST" or "PUT"
|
|
||||||
methodString = http.MethodGet
|
|
||||||
}
|
|
||||||
|
|
||||||
supported := false
|
|
||||||
for _, method := range d.TempURLMethods {
|
|
||||||
if method == methodString {
|
|
||||||
supported = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !supported {
|
|
||||||
return "", storagedriver.ErrUnsupportedMethod{}
|
|
||||||
}
|
|
||||||
|
|
||||||
expiresTime := time.Now().Add(20 * time.Minute)
|
|
||||||
expires, ok := options["expiry"]
|
|
||||||
if ok {
|
|
||||||
et, ok := expires.(time.Time)
|
|
||||||
if ok {
|
|
||||||
expiresTime = et
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime)
|
|
||||||
|
|
||||||
if d.AccessKey != "" {
|
|
||||||
// On HP Cloud, the signature must be in the form of tenant_id:access_key:signature
|
|
||||||
url, _ := url.Parse(tempURL)
|
|
||||||
query := url.Query()
|
|
||||||
query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig")))
|
|
||||||
url.RawQuery = query.Encode()
|
|
||||||
tempURL = url.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return tempURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk traverses a filesystem defined within driver, starting
|
|
||||||
// from the given path, calling f on each file and directory
|
|
||||||
func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error {
|
|
||||||
return storagedriver.WalkFallback(ctx, d, path, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *driver) swiftPath(path string) string {
|
|
||||||
return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// swiftSegmentPath returns a randomly generated path in the segments directory.
|
|
||||||
func (d *driver) swiftSegmentPath(path string) (string, error) {
|
|
||||||
checksum := sha1.New()
|
|
||||||
checksum.Write([]byte(path))
|
|
||||||
|
|
||||||
if _, err := io.CopyN(checksum, rand.Reader, 32); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
path = hex.EncodeToString(checksum.Sum(nil))
|
|
||||||
return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
|
|
||||||
// a simple container listing works 99.9% of the time
|
|
||||||
segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path})
|
|
||||||
if err != nil {
|
|
||||||
if err == swift.ContainerNotFound {
|
|
||||||
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// build a lookup table by object name
|
|
||||||
hasObjectName := make(map[string]struct{})
|
|
||||||
for _, segment := range segments {
|
|
||||||
hasObjectName[segment.Name] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The container listing might be outdated (i.e. not contain all existing
|
|
||||||
// segment objects yet) because of temporary inconsistency (Swift is only
|
|
||||||
// eventually consistent!). Check its completeness.
|
|
||||||
segmentNumber := 0
|
|
||||||
for {
|
|
||||||
segmentNumber++
|
|
||||||
segmentPath := getSegmentPath(path, segmentNumber)
|
|
||||||
|
|
||||||
if _, seen := hasObjectName[segmentPath]; seen {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This segment is missing in the container listing. Use a more reliable
|
|
||||||
// request to check its existence. (HEAD requests on segments are
|
|
||||||
// guaranteed to return the correct metadata, except for the pathological
|
|
||||||
// case of an outage of large parts of the Swift cluster or its network,
|
|
||||||
// since every segment is only written once.)
|
|
||||||
segment, _, err := d.Conn.Object(d.Container, segmentPath)
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
// found new segment -> keep going, more might be missing
|
|
||||||
segments = append(segments, segment)
|
|
||||||
continue
|
|
||||||
case swift.ObjectNotFound:
|
|
||||||
// This segment is missing. Since we upload segments sequentially,
|
|
||||||
// there won't be any more segments after it.
|
|
||||||
return segments, nil
|
|
||||||
default:
|
|
||||||
return nil, err // unexpected error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *driver) createManifest(path string, segments string) error {
|
|
||||||
headers := make(swift.Headers)
|
|
||||||
headers["X-Object-Manifest"] = segments
|
|
||||||
manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers)
|
|
||||||
if err != nil {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := manifest.Close(); err != nil {
|
|
||||||
if err == swift.ObjectNotFound {
|
|
||||||
return storagedriver.PathNotFoundError{Path: path}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func chunkFilenames(slice []string, maxSize int) (chunks [][]string, err error) {
|
|
||||||
if maxSize > 0 {
|
|
||||||
for offset := 0; offset < len(slice); offset += maxSize {
|
|
||||||
chunkSize := maxSize
|
|
||||||
if offset+chunkSize > len(slice) {
|
|
||||||
chunkSize = len(slice) - offset
|
|
||||||
}
|
|
||||||
chunks = append(chunks, slice[offset:offset+chunkSize])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("max chunk size must be > 0")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseManifest(manifest string) (container string, prefix string) {
|
|
||||||
container, prefix, _ = strings.Cut(manifest, "/")
|
|
||||||
return container, prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateSecret() (string, error) {
|
|
||||||
var secretBytes [32]byte
|
|
||||||
if _, err := rand.Read(secretBytes[:]); err != nil {
|
|
||||||
return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err)
|
|
||||||
}
|
|
||||||
return hex.EncodeToString(secretBytes[:]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSegmentPath(segmentsPath string, partNumber int) string {
|
|
||||||
return fmt.Sprintf("%s/%016d", segmentsPath, partNumber)
|
|
||||||
}
|
|
||||||
|
|
||||||
type writer struct {
|
|
||||||
driver *driver
|
|
||||||
path string
|
|
||||||
segmentsPath string
|
|
||||||
size int64
|
|
||||||
bw *bufio.Writer
|
|
||||||
closed bool
|
|
||||||
committed bool
|
|
||||||
cancelled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter {
|
|
||||||
var size int64
|
|
||||||
for _, segment := range segments {
|
|
||||||
size += segment.Bytes
|
|
||||||
}
|
|
||||||
return &writer{
|
|
||||||
driver: d,
|
|
||||||
path: path,
|
|
||||||
segmentsPath: segmentsPath,
|
|
||||||
size: size,
|
|
||||||
bw: bufio.NewWriterSize(&segmentWriter{
|
|
||||||
conn: d.Conn,
|
|
||||||
container: d.Container,
|
|
||||||
segmentsPath: segmentsPath,
|
|
||||||
segmentNumber: len(segments) + 1,
|
|
||||||
maxChunkSize: d.ChunkSize,
|
|
||||||
}, d.ChunkSize),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Write(p []byte) (int, error) {
|
|
||||||
if w.closed {
|
|
||||||
return 0, fmt.Errorf("already closed")
|
|
||||||
} else if w.committed {
|
|
||||||
return 0, fmt.Errorf("already committed")
|
|
||||||
} else if w.cancelled {
|
|
||||||
return 0, fmt.Errorf("already cancelled")
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := w.bw.Write(p)
|
|
||||||
w.size += int64(n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Size() int64 {
|
|
||||||
return w.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Close() error {
|
|
||||||
if w.closed {
|
|
||||||
return fmt.Errorf("already closed")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.bw.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !w.committed && !w.cancelled {
|
|
||||||
if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.waitForSegmentsToShowUp(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.closed = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Cancel(ctx context.Context) error {
|
|
||||||
if w.closed {
|
|
||||||
return fmt.Errorf("already closed")
|
|
||||||
} else if w.committed {
|
|
||||||
return fmt.Errorf("already committed")
|
|
||||||
}
|
|
||||||
w.cancelled = true
|
|
||||||
return w.driver.Delete(ctx, w.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Commit() error {
|
|
||||||
if w.closed {
|
|
||||||
return fmt.Errorf("already closed")
|
|
||||||
} else if w.committed {
|
|
||||||
return fmt.Errorf("already committed")
|
|
||||||
} else if w.cancelled {
|
|
||||||
return fmt.Errorf("already cancelled")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.bw.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.committed = true
|
|
||||||
return w.waitForSegmentsToShowUp()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) waitForSegmentsToShowUp() error {
|
|
||||||
var err error
|
|
||||||
waitingTime := readAfterWriteWait
|
|
||||||
endTime := time.Now().Add(readAfterWriteTimeout)
|
|
||||||
|
|
||||||
for {
|
|
||||||
var info swift.Object
|
|
||||||
if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil {
|
|
||||||
if info.Bytes == w.size {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = fmt.Errorf("timeout expired while waiting for segments of %s to show up", w.path)
|
|
||||||
}
|
|
||||||
if time.Now().Add(waitingTime).After(endTime) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(waitingTime)
|
|
||||||
waitingTime *= 2
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type segmentWriter struct {
|
|
||||||
conn *swift.Connection
|
|
||||||
container string
|
|
||||||
segmentsPath string
|
|
||||||
segmentNumber int
|
|
||||||
maxChunkSize int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sw *segmentWriter) Write(p []byte) (int, error) {
|
|
||||||
n := 0
|
|
||||||
for offset := 0; offset < len(p); offset += sw.maxChunkSize {
|
|
||||||
chunkSize := sw.maxChunkSize
|
|
||||||
if offset+chunkSize > len(p) {
|
|
||||||
chunkSize = len(p) - offset
|
|
||||||
}
|
|
||||||
_, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sw.segmentNumber++
|
|
||||||
n += chunkSize
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
|
@ -1,257 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ncw/swift/swifttest"
|
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3/context"
|
|
||||||
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
|
|
||||||
"github.com/distribution/distribution/v3/registry/storage/driver/testsuites"
|
|
||||||
|
|
||||||
"gopkg.in/check.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Hook up gocheck into the "go test" runner.
|
|
||||||
func Test(t *testing.T) { check.TestingT(t) }
|
|
||||||
|
|
||||||
var swiftDriverConstructor func(prefix string) (*Driver, error)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
var (
|
|
||||||
username = os.Getenv("SWIFT_USERNAME")
|
|
||||||
password = os.Getenv("SWIFT_PASSWORD")
|
|
||||||
authURL = os.Getenv("SWIFT_AUTH_URL")
|
|
||||||
tenant = os.Getenv("SWIFT_TENANT_NAME")
|
|
||||||
tenantID = os.Getenv("SWIFT_TENANT_ID")
|
|
||||||
domain = os.Getenv("SWIFT_DOMAIN_NAME")
|
|
||||||
domainID = os.Getenv("SWIFT_DOMAIN_ID")
|
|
||||||
tenantDomain = os.Getenv("SWIFT_DOMAIN_NAME")
|
|
||||||
tenantDomainID = os.Getenv("SWIFT_DOMAIN_ID")
|
|
||||||
trustID = os.Getenv("SWIFT_TRUST_ID")
|
|
||||||
container = os.Getenv("SWIFT_CONTAINER_NAME")
|
|
||||||
region = os.Getenv("SWIFT_REGION_NAME")
|
|
||||||
AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION"))
|
|
||||||
endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE")
|
|
||||||
insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY"))
|
|
||||||
secretKey = os.Getenv("SWIFT_SECRET_KEY")
|
|
||||||
accessKey = os.Getenv("SWIFT_ACCESS_KEY")
|
|
||||||
containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY"))
|
|
||||||
tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",")
|
|
||||||
|
|
||||||
swiftServer *swifttest.SwiftServer
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if username == "" || password == "" || authURL == "" || container == "" {
|
|
||||||
if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
username = "swifttest"
|
|
||||||
password = "swifttest"
|
|
||||||
authURL = swiftServer.AuthURL
|
|
||||||
container = "test"
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix, err := os.MkdirTemp("", "driver-")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer os.Remove(prefix)
|
|
||||||
|
|
||||||
swiftDriverConstructor = func(root string) (*Driver, error) {
|
|
||||||
parameters := Parameters{
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
authURL,
|
|
||||||
tenant,
|
|
||||||
tenantID,
|
|
||||||
domain,
|
|
||||||
domainID,
|
|
||||||
tenantDomain,
|
|
||||||
tenantDomainID,
|
|
||||||
trustID,
|
|
||||||
region,
|
|
||||||
AuthVersion,
|
|
||||||
container,
|
|
||||||
root,
|
|
||||||
endpointType,
|
|
||||||
insecureSkipVerify,
|
|
||||||
defaultChunkSize,
|
|
||||||
secretKey,
|
|
||||||
accessKey,
|
|
||||||
containerKey,
|
|
||||||
tempURLMethods,
|
|
||||||
}
|
|
||||||
|
|
||||||
return New(parameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
driverConstructor := func() (storagedriver.StorageDriver, error) {
|
|
||||||
return swiftDriverConstructor(prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmptyRootList(t *testing.T) {
|
|
||||||
validRoot := t.TempDir()
|
|
||||||
|
|
||||||
rootedDriver, err := swiftDriverConstructor(validRoot)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating rooted driver: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
emptyRootDriver, err := swiftDriverConstructor("")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating empty root driver: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
slashRootDriver, err := swiftDriverConstructor("/")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating slash root driver: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := "/test"
|
|
||||||
contents := []byte("contents")
|
|
||||||
ctx := context.Background()
|
|
||||||
err = rootedDriver.PutContent(ctx, filename, contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating content: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
keys, _ := emptyRootDriver.List(ctx, "/")
|
|
||||||
for _, path := range keys {
|
|
||||||
if !storagedriver.PathRegexp.MatchString(path) {
|
|
||||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
keys, _ = slashRootDriver.List(ctx, "/")
|
|
||||||
for _, path := range keys {
|
|
||||||
if !storagedriver.PathRegexp.MatchString(path) {
|
|
||||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an object with a path nested under the existing object
|
|
||||||
err = rootedDriver.PutContent(ctx, filename+"/file1", contents)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating content: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rootedDriver.Delete(ctx, filename)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to delete: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
keys, err = rootedDriver.List(ctx, "/")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to list objects after deletion: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(keys) != 0 {
|
|
||||||
t.Fatal("delete did not remove nested objects")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilenameChunking(t *testing.T) {
|
|
||||||
// Test valid input and sizes
|
|
||||||
input := []string{"a", "b", "c", "d", "e"}
|
|
||||||
expecteds := [][][]string{
|
|
||||||
{
|
|
||||||
{"a"},
|
|
||||||
{"b"},
|
|
||||||
{"c"},
|
|
||||||
{"d"},
|
|
||||||
{"e"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{"a", "b"},
|
|
||||||
{"c", "d"},
|
|
||||||
{"e"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{"a", "b", "c"},
|
|
||||||
{"d", "e"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{"a", "b", "c", "d"},
|
|
||||||
{"e"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{"a", "b", "c", "d", "e"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{"a", "b", "c", "d", "e"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i, expected := range expecteds {
|
|
||||||
actual, err := chunkFilenames(input, i+1)
|
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
|
||||||
t.Fatalf("chunk %v didn't match expected value %v", actual, expected)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error chunking filenames: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test nil input
|
|
||||||
actual, err := chunkFilenames(nil, 5)
|
|
||||||
if len(actual) != 0 {
|
|
||||||
t.Fatal("chunks were returned when passed nil")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error chunking filenames: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test 0 and < 0 sizes
|
|
||||||
_, err = chunkFilenames(nil, 0)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected error for size = 0")
|
|
||||||
}
|
|
||||||
_, err = chunkFilenames(nil, -1)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected error for size = -1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSwiftSegmentPath(t *testing.T) {
|
|
||||||
d := &driver{
|
|
||||||
Prefix: "/test/segment/path",
|
|
||||||
}
|
|
||||||
|
|
||||||
s1, err := d.swiftSegmentPath("foo-baz")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error generating segment path: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s2, err := d.swiftSegmentPath("foo-baz")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error generating segment path: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(s1, "test/segment/path/segments/") {
|
|
||||||
t.Fatalf("expected to be prefixed: %s", s1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(s1, "test/segment/path/segments/") {
|
|
||||||
t.Fatalf("expected to be prefixed: %s", s2)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(s1) != 68 {
|
|
||||||
t.Fatalf("unexpected segment path length, %d != %d", len(s1), 68)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(s2) != 68 {
|
|
||||||
t.Fatalf("unexpected segment path length, %d != %d", len(s2), 68)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s1 == s2 {
|
|
||||||
t.Fatalf("expected segment paths to differ, %s == %s", s1, s2)
|
|
||||||
}
|
|
||||||
}
|
|
4
vendor/github.com/ncw/swift/.gitignore
generated
vendored
4
vendor/github.com/ncw/swift/.gitignore
generated
vendored
|
@ -1,4 +0,0 @@
|
||||||
*~
|
|
||||||
*.pyc
|
|
||||||
test-env*
|
|
||||||
junk/
|
|
33
vendor/github.com/ncw/swift/.travis.yml
generated
vendored
33
vendor/github.com/ncw/swift/.travis.yml
generated
vendored
|
@ -1,33 +0,0 @@
|
||||||
language: go
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.2.x
|
|
||||||
- 1.3.x
|
|
||||||
- 1.4.x
|
|
||||||
- 1.5.x
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
|
||||||
- 1.11.x
|
|
||||||
- 1.12.x
|
|
||||||
- master
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- go: 1.12.x
|
|
||||||
env: TEST_REAL_SERVER=rackspace
|
|
||||||
- go: 1.12.x
|
|
||||||
env: TEST_REAL_SERVER=memset
|
|
||||||
allow_failures:
|
|
||||||
- go: 1.12.x
|
|
||||||
env: TEST_REAL_SERVER=rackspace
|
|
||||||
- go: 1.12.x
|
|
||||||
env: TEST_REAL_SERVER=memset
|
|
||||||
install: go test -i ./...
|
|
||||||
script:
|
|
||||||
- test -z "$(go fmt ./...)"
|
|
||||||
- go test
|
|
||||||
- ./travis_realserver.sh
|
|
20
vendor/github.com/ncw/swift/COPYING
generated
vendored
20
vendor/github.com/ncw/swift/COPYING
generated
vendored
|
@ -1,20 +0,0 @@
|
||||||
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
|
|
156
vendor/github.com/ncw/swift/README.md
generated
vendored
156
vendor/github.com/ncw/swift/README.md
generated
vendored
|
@ -1,156 +0,0 @@
|
||||||
Swift
|
|
||||||
=====
|
|
||||||
|
|
||||||
This package provides an easy to use library for interfacing with
|
|
||||||
Swift / Openstack Object Storage / Rackspace cloud files from the Go
|
|
||||||
Language
|
|
||||||
|
|
||||||
See here for package docs
|
|
||||||
|
|
||||||
http://godoc.org/github.com/ncw/swift
|
|
||||||
|
|
||||||
[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift)
|
|
||||||
|
|
||||||
Install
|
|
||||||
-------
|
|
||||||
|
|
||||||
Use go to install the library
|
|
||||||
|
|
||||||
go get github.com/ncw/swift
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
See here for full package docs
|
|
||||||
|
|
||||||
- http://godoc.org/github.com/ncw/swift
|
|
||||||
|
|
||||||
Here is a short example from the docs
|
|
||||||
```go
|
|
||||||
import "github.com/ncw/swift"
|
|
||||||
|
|
||||||
// Create a connection
|
|
||||||
c := swift.Connection{
|
|
||||||
UserName: "user",
|
|
||||||
ApiKey: "key",
|
|
||||||
AuthUrl: "auth_url",
|
|
||||||
Domain: "domain", // Name of the domain (v3 auth only)
|
|
||||||
Tenant: "tenant", // Name of the tenant (v2 auth only)
|
|
||||||
}
|
|
||||||
// Authenticate
|
|
||||||
err := c.Authenticate()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
// List all the containers
|
|
||||||
containers, err := c.ContainerNames(nil)
|
|
||||||
fmt.Println(containers)
|
|
||||||
// etc...
|
|
||||||
```
|
|
||||||
|
|
||||||
Additions
|
|
||||||
---------
|
|
||||||
|
|
||||||
The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface.
|
|
||||||
|
|
||||||
Testing
|
|
||||||
-------
|
|
||||||
|
|
||||||
To run the tests you can either use an embedded fake Swift server
|
|
||||||
either use a real Openstack Swift server or a Rackspace Cloud files account.
|
|
||||||
|
|
||||||
When using a real Swift server, you need to set these environment variables
|
|
||||||
before running the tests
|
|
||||||
|
|
||||||
export SWIFT_API_USER='user'
|
|
||||||
export SWIFT_API_KEY='key'
|
|
||||||
export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0'
|
|
||||||
|
|
||||||
And optionally these if using v2 authentication
|
|
||||||
|
|
||||||
export SWIFT_TENANT='TenantName'
|
|
||||||
export SWIFT_TENANT_ID='TenantId'
|
|
||||||
|
|
||||||
And optionally these if using v3 authentication
|
|
||||||
|
|
||||||
export SWIFT_TENANT='TenantName'
|
|
||||||
export SWIFT_TENANT_ID='TenantId'
|
|
||||||
export SWIFT_API_DOMAIN_ID='domain id'
|
|
||||||
export SWIFT_API_DOMAIN='domain name'
|
|
||||||
|
|
||||||
And optionally these if using v3 trust
|
|
||||||
|
|
||||||
export SWIFT_TRUST_ID='TrustId'
|
|
||||||
|
|
||||||
And optionally this if you want to skip server certificate validation
|
|
||||||
|
|
||||||
export SWIFT_AUTH_INSECURE=1
|
|
||||||
|
|
||||||
And optionally this to configure the connect channel timeout, in seconds
|
|
||||||
|
|
||||||
export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60
|
|
||||||
|
|
||||||
And optionally this to configure the data channel timeout, in seconds
|
|
||||||
|
|
||||||
export SWIFT_DATA_CHANNEL_TIMEOUT=60
|
|
||||||
|
|
||||||
Then run the tests with `go test`
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
This is free software under the terms of MIT license (check COPYING file
|
|
||||||
included in this package).
|
|
||||||
|
|
||||||
Contact and support
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
The project website is at:
|
|
||||||
|
|
||||||
- https://github.com/ncw/swift
|
|
||||||
|
|
||||||
There you can file bug reports, ask for help or contribute patches.
|
|
||||||
|
|
||||||
Authors
|
|
||||||
-------
|
|
||||||
|
|
||||||
- Nick Craig-Wood <nick@craig-wood.com>
|
|
||||||
|
|
||||||
Contributors
|
|
||||||
------------
|
|
||||||
|
|
||||||
- Brian "bojo" Jones <mojobojo@gmail.com>
|
|
||||||
- Janika Liiv <janika@toggl.com>
|
|
||||||
- Yamamoto, Hirotaka <ymmt2005@gmail.com>
|
|
||||||
- Stephen <yo@groks.org>
|
|
||||||
- platformpurple <stephen@platformpurple.com>
|
|
||||||
- Paul Querna <pquerna@apache.org>
|
|
||||||
- Livio Soares <liviobs@gmail.com>
|
|
||||||
- thesyncim <thesyncim@gmail.com>
|
|
||||||
- lsowen <lsowen@s1network.com> <logan@s1network.com>
|
|
||||||
- Sylvain Baubeau <sbaubeau@redhat.com>
|
|
||||||
- Chris Kastorff <encryptio@gmail.com>
|
|
||||||
- Dai HaoJun <haojun.dai@hp.com>
|
|
||||||
- Hua Wang <wanghua.humble@gmail.com>
|
|
||||||
- Fabian Ruff <fabian@progra.de> <fabian.ruff@sap.com>
|
|
||||||
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
|
|
||||||
- Petr Kotek <petr.kotek@bigcommerce.com>
|
|
||||||
- Stefan Majewsky <stefan.majewsky@sap.com> <majewsky@gmx.net>
|
|
||||||
- Cezar Sa Espinola <cezarsa@gmail.com>
|
|
||||||
- Sam Gunaratne <samgzeit@gmail.com>
|
|
||||||
- Richard Scothern <richard.scothern@gmail.com>
|
|
||||||
- Michel Couillard <!--<couillard.michel@voxlog.ca>--> <michel.couillard@gmail.com>
|
|
||||||
- Christopher Waldon <ckwaldon@us.ibm.com>
|
|
||||||
- dennis <dai.haojun@gmail.com>
|
|
||||||
- hag <hannes.georg@xing.com>
|
|
||||||
- Alexander Neumann <alexander@bumpern.de>
|
|
||||||
- eclipseo <30413512+eclipseo@users.noreply.github.com>
|
|
||||||
- Yuri Per <yuri@acronis.com>
|
|
||||||
- Falk Reimann <falk.reimann@sap.com>
|
|
||||||
- Arthur Paim Arnold <arthurpaimarnold@gmail.com>
|
|
||||||
- Bruno Michel <bmichel@menfin.info>
|
|
||||||
- Charles Hsu <charles0126@gmail.com>
|
|
||||||
- Omar Ali <omarali@users.noreply.github.com>
|
|
||||||
- Andreas Andersen <andreas@softwaredesign.se>
|
|
||||||
- kayrus <kay.diam@gmail.com>
|
|
||||||
- CodeLingo Bot <bot@codelingo.io>
|
|
335
vendor/github.com/ncw/swift/auth.go
generated
vendored
335
vendor/github.com/ncw/swift/auth.go
generated
vendored
|
@ -1,335 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Auth defines the operations needed to authenticate with swift
|
|
||||||
//
|
|
||||||
// This encapsulates the different authentication schemes in use
|
|
||||||
type Authenticator interface {
|
|
||||||
// Request creates an http.Request for the auth - return nil if not needed
|
|
||||||
Request(*Connection) (*http.Request, error)
|
|
||||||
// Response parses the http.Response
|
|
||||||
Response(resp *http.Response) error
|
|
||||||
// The public storage URL - set Internal to true to read
|
|
||||||
// internal/service net URL
|
|
||||||
StorageUrl(Internal bool) string
|
|
||||||
// The access token
|
|
||||||
Token() string
|
|
||||||
// The CDN url if available
|
|
||||||
CdnUrl() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expireser is an optional interface to read the expiration time of the token
|
|
||||||
type Expireser interface {
|
|
||||||
Expires() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type CustomEndpointAuthenticator interface {
|
|
||||||
StorageUrlForEndpoint(endpointType EndpointType) string
|
|
||||||
}
|
|
||||||
|
|
||||||
type EndpointType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Use public URL as storage URL
|
|
||||||
EndpointTypePublic = EndpointType("public")
|
|
||||||
|
|
||||||
// Use internal URL as storage URL
|
|
||||||
EndpointTypeInternal = EndpointType("internal")
|
|
||||||
|
|
||||||
// Use admin URL as storage URL
|
|
||||||
EndpointTypeAdmin = EndpointType("admin")
|
|
||||||
)
|
|
||||||
|
|
||||||
// newAuth - create a new Authenticator from the AuthUrl
|
|
||||||
//
|
|
||||||
// A hint for AuthVersion can be provided
|
|
||||||
func newAuth(c *Connection) (Authenticator, error) {
|
|
||||||
AuthVersion := c.AuthVersion
|
|
||||||
if AuthVersion == 0 {
|
|
||||||
if strings.Contains(c.AuthUrl, "v3") {
|
|
||||||
AuthVersion = 3
|
|
||||||
} else if strings.Contains(c.AuthUrl, "v2") {
|
|
||||||
AuthVersion = 2
|
|
||||||
} else if strings.Contains(c.AuthUrl, "v1") {
|
|
||||||
AuthVersion = 1
|
|
||||||
} else {
|
|
||||||
return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch AuthVersion {
|
|
||||||
case 1:
|
|
||||||
return &v1Auth{}, nil
|
|
||||||
case 2:
|
|
||||||
return &v2Auth{
|
|
||||||
// Guess as to whether using API key or
|
|
||||||
// password it will try both eventually so
|
|
||||||
// this is just an optimization.
|
|
||||||
useApiKey: len(c.ApiKey) >= 32,
|
|
||||||
}, nil
|
|
||||||
case 3:
|
|
||||||
return &v3Auth{}, nil
|
|
||||||
}
|
|
||||||
return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// v1 auth
|
|
||||||
type v1Auth struct {
|
|
||||||
Headers http.Header // V1 auth: the authentication headers so extensions can access them
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 Authentication - make request
|
|
||||||
func (auth *v1Auth) Request(c *Connection) (*http.Request, error) {
|
|
||||||
req, err := http.NewRequest("GET", c.AuthUrl, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", c.UserAgent)
|
|
||||||
req.Header.Set("X-Auth-Key", c.ApiKey)
|
|
||||||
req.Header.Set("X-Auth-User", c.UserName)
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 Authentication - read response
|
|
||||||
func (auth *v1Auth) Response(resp *http.Response) error {
|
|
||||||
auth.Headers = resp.Header
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 Authentication - read storage url
|
|
||||||
func (auth *v1Auth) StorageUrl(Internal bool) string {
|
|
||||||
storageUrl := auth.Headers.Get("X-Storage-Url")
|
|
||||||
if Internal {
|
|
||||||
newUrl, err := url.Parse(storageUrl)
|
|
||||||
if err != nil {
|
|
||||||
return storageUrl
|
|
||||||
}
|
|
||||||
newUrl.Host = "snet-" + newUrl.Host
|
|
||||||
storageUrl = newUrl.String()
|
|
||||||
}
|
|
||||||
return storageUrl
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 Authentication - read auth token
|
|
||||||
func (auth *v1Auth) Token() string {
|
|
||||||
return auth.Headers.Get("X-Auth-Token")
|
|
||||||
}
|
|
||||||
|
|
||||||
// v1 Authentication - read cdn url
|
|
||||||
func (auth *v1Auth) CdnUrl() string {
|
|
||||||
return auth.Headers.Get("X-CDN-Management-Url")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// v2 Authentication
|
|
||||||
type v2Auth struct {
|
|
||||||
Auth *v2AuthResponse
|
|
||||||
Region string
|
|
||||||
useApiKey bool // if set will use API key not Password
|
|
||||||
useApiKeyOk bool // if set won't change useApiKey any more
|
|
||||||
notFirst bool // set after first run
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - make request
|
|
||||||
func (auth *v2Auth) Request(c *Connection) (*http.Request, error) {
|
|
||||||
auth.Region = c.Region
|
|
||||||
// Toggle useApiKey if not first run and not OK yet
|
|
||||||
if auth.notFirst && !auth.useApiKeyOk {
|
|
||||||
auth.useApiKey = !auth.useApiKey
|
|
||||||
}
|
|
||||||
auth.notFirst = true
|
|
||||||
// Create a V2 auth request for the body of the connection
|
|
||||||
var v2i interface{}
|
|
||||||
if !auth.useApiKey {
|
|
||||||
// Normal swift authentication
|
|
||||||
v2 := v2AuthRequest{}
|
|
||||||
v2.Auth.PasswordCredentials.UserName = c.UserName
|
|
||||||
v2.Auth.PasswordCredentials.Password = c.ApiKey
|
|
||||||
v2.Auth.Tenant = c.Tenant
|
|
||||||
v2.Auth.TenantId = c.TenantId
|
|
||||||
v2i = v2
|
|
||||||
} else {
|
|
||||||
// Rackspace special with API Key
|
|
||||||
v2 := v2AuthRequestRackspace{}
|
|
||||||
v2.Auth.ApiKeyCredentials.UserName = c.UserName
|
|
||||||
v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
|
|
||||||
v2.Auth.Tenant = c.Tenant
|
|
||||||
v2.Auth.TenantId = c.TenantId
|
|
||||||
v2i = v2
|
|
||||||
}
|
|
||||||
body, err := json.Marshal(v2i)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
url := c.AuthUrl
|
|
||||||
if !strings.HasSuffix(url, "/") {
|
|
||||||
url += "/"
|
|
||||||
}
|
|
||||||
url += "tokens"
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("User-Agent", c.UserAgent)
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - read response
|
|
||||||
func (auth *v2Auth) Response(resp *http.Response) error {
|
|
||||||
auth.Auth = new(v2AuthResponse)
|
|
||||||
err := readJson(resp, auth.Auth)
|
|
||||||
// If successfully read Auth then no need to toggle useApiKey any more
|
|
||||||
if err == nil {
|
|
||||||
auth.useApiKeyOk = true
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finds the Endpoint Url of "type" from the v2AuthResponse using the
|
|
||||||
// Region if set or defaulting to the first one if not
|
|
||||||
//
|
|
||||||
// Returns "" if not found
|
|
||||||
func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
|
|
||||||
for _, catalog := range auth.Auth.Access.ServiceCatalog {
|
|
||||||
if catalog.Type == Type {
|
|
||||||
for _, endpoint := range catalog.Endpoints {
|
|
||||||
if auth.Region == "" || (auth.Region == endpoint.Region) {
|
|
||||||
switch endpointType {
|
|
||||||
case EndpointTypeInternal:
|
|
||||||
return endpoint.InternalUrl
|
|
||||||
case EndpointTypePublic:
|
|
||||||
return endpoint.PublicUrl
|
|
||||||
case EndpointTypeAdmin:
|
|
||||||
return endpoint.AdminUrl
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - read storage url
|
|
||||||
//
|
|
||||||
// If Internal is true then it reads the private (internal / service
|
|
||||||
// net) URL.
|
|
||||||
func (auth *v2Auth) StorageUrl(Internal bool) string {
|
|
||||||
endpointType := EndpointTypePublic
|
|
||||||
if Internal {
|
|
||||||
endpointType = EndpointTypeInternal
|
|
||||||
}
|
|
||||||
return auth.StorageUrlForEndpoint(endpointType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - read storage url
|
|
||||||
//
|
|
||||||
// Use the indicated endpointType to choose a URL.
|
|
||||||
func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
|
|
||||||
return auth.endpointUrl("object-store", endpointType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - read auth token
|
|
||||||
func (auth *v2Auth) Token() string {
|
|
||||||
return auth.Auth.Access.Token.Id
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - read expires
|
|
||||||
func (auth *v2Auth) Expires() time.Time {
|
|
||||||
t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{} // return Zero if not parsed
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// v2 Authentication - read cdn url
|
|
||||||
func (auth *v2Auth) CdnUrl() string {
|
|
||||||
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// V2 Authentication request
|
|
||||||
//
|
|
||||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
|
||||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
|
||||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
|
||||||
type v2AuthRequest struct {
|
|
||||||
Auth struct {
|
|
||||||
PasswordCredentials struct {
|
|
||||||
UserName string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
} `json:"passwordCredentials"`
|
|
||||||
Tenant string `json:"tenantName,omitempty"`
|
|
||||||
TenantId string `json:"tenantId,omitempty"`
|
|
||||||
} `json:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2 Authentication request - Rackspace variant
|
|
||||||
//
|
|
||||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
|
||||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
|
||||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
|
||||||
type v2AuthRequestRackspace struct {
|
|
||||||
Auth struct {
|
|
||||||
ApiKeyCredentials struct {
|
|
||||||
UserName string `json:"username"`
|
|
||||||
ApiKey string `json:"apiKey"`
|
|
||||||
} `json:"RAX-KSKEY:apiKeyCredentials"`
|
|
||||||
Tenant string `json:"tenantName,omitempty"`
|
|
||||||
TenantId string `json:"tenantId,omitempty"`
|
|
||||||
} `json:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2 Authentication reply
|
|
||||||
//
|
|
||||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
|
||||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
|
||||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
|
||||||
type v2AuthResponse struct {
|
|
||||||
Access struct {
|
|
||||||
ServiceCatalog []struct {
|
|
||||||
Endpoints []struct {
|
|
||||||
InternalUrl string
|
|
||||||
PublicUrl string
|
|
||||||
AdminUrl string
|
|
||||||
Region string
|
|
||||||
TenantId string
|
|
||||||
}
|
|
||||||
Name string
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
Token struct {
|
|
||||||
Expires string
|
|
||||||
Id string
|
|
||||||
Tenant struct {
|
|
||||||
Id string
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
User struct {
|
|
||||||
DefaultRegion string `json:"RAX-AUTH:defaultRegion"`
|
|
||||||
Id string
|
|
||||||
Name string
|
|
||||||
Roles []struct {
|
|
||||||
Description string
|
|
||||||
Id string
|
|
||||||
Name string
|
|
||||||
TenantId string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
300
vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
300
vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
|
@ -1,300 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
v3AuthMethodToken = "token"
|
|
||||||
v3AuthMethodPassword = "password"
|
|
||||||
v3AuthMethodApplicationCredential = "application_credential"
|
|
||||||
v3CatalogTypeObjectStore = "object-store"
|
|
||||||
)
|
|
||||||
|
|
||||||
// V3 Authentication request
|
|
||||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
|
||||||
// http://developer.openstack.org/api-ref-identity-v3.html
|
|
||||||
type v3AuthRequest struct {
|
|
||||||
Auth struct {
|
|
||||||
Identity struct {
|
|
||||||
Methods []string `json:"methods"`
|
|
||||||
Password *v3AuthPassword `json:"password,omitempty"`
|
|
||||||
Token *v3AuthToken `json:"token,omitempty"`
|
|
||||||
ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"`
|
|
||||||
} `json:"identity"`
|
|
||||||
Scope *v3Scope `json:"scope,omitempty"`
|
|
||||||
} `json:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3Scope struct {
|
|
||||||
Project *v3Project `json:"project,omitempty"`
|
|
||||||
Domain *v3Domain `json:"domain,omitempty"`
|
|
||||||
Trust *v3Trust `json:"OS-TRUST:trust,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3Domain struct {
|
|
||||||
Id string `json:"id,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3Project struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Id string `json:"id,omitempty"`
|
|
||||||
Domain *v3Domain `json:"domain,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3Trust struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3User struct {
|
|
||||||
Domain *v3Domain `json:"domain,omitempty"`
|
|
||||||
Id string `json:"id,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Password string `json:"password,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3AuthToken struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3AuthPassword struct {
|
|
||||||
User v3User `json:"user"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3AuthApplicationCredential struct {
|
|
||||||
Id string `json:"id,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Secret string `json:"secret,omitempty"`
|
|
||||||
User *v3User `json:"user,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// V3 Authentication response
|
|
||||||
type v3AuthResponse struct {
|
|
||||||
Token struct {
|
|
||||||
ExpiresAt string `json:"expires_at"`
|
|
||||||
IssuedAt string `json:"issued_at"`
|
|
||||||
Methods []string
|
|
||||||
Roles []struct {
|
|
||||||
Id, Name string
|
|
||||||
Links struct {
|
|
||||||
Self string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Project struct {
|
|
||||||
Domain struct {
|
|
||||||
Id, Name string
|
|
||||||
}
|
|
||||||
Id, Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
Catalog []struct {
|
|
||||||
Id, Namem, Type string
|
|
||||||
Endpoints []struct {
|
|
||||||
Id, Region_Id, Url, Region string
|
|
||||||
Interface EndpointType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
User struct {
|
|
||||||
Id, Name string
|
|
||||||
Domain struct {
|
|
||||||
Id, Name string
|
|
||||||
Links struct {
|
|
||||||
Self string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Audit_Ids []string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type v3Auth struct {
|
|
||||||
Region string
|
|
||||||
Auth *v3AuthResponse
|
|
||||||
Headers http.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
|
||||||
auth.Region = c.Region
|
|
||||||
|
|
||||||
var v3i interface{}
|
|
||||||
|
|
||||||
v3 := v3AuthRequest{}
|
|
||||||
|
|
||||||
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" {
|
|
||||||
var user *v3User
|
|
||||||
|
|
||||||
if c.ApplicationCredentialId != "" {
|
|
||||||
c.ApplicationCredentialName = ""
|
|
||||||
user = &v3User{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if user == nil && c.UserId != "" {
|
|
||||||
// UserID could be used without the domain information
|
|
||||||
user = &v3User{
|
|
||||||
Id: c.UserId,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if user == nil && c.UserName == "" {
|
|
||||||
// Make sure that Username or UserID are provided
|
|
||||||
return nil, fmt.Errorf("UserID or Name should be provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
if user == nil && c.DomainId != "" {
|
|
||||||
user = &v3User{
|
|
||||||
Name: c.UserName,
|
|
||||||
Domain: &v3Domain{
|
|
||||||
Id: c.DomainId,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if user == nil && c.Domain != "" {
|
|
||||||
user = &v3User{
|
|
||||||
Name: c.UserName,
|
|
||||||
Domain: &v3Domain{
|
|
||||||
Name: c.Domain,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure that DomainID or DomainName are provided among Username
|
|
||||||
if user == nil {
|
|
||||||
return nil, fmt.Errorf("DomainID or Domain should be provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential}
|
|
||||||
v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{
|
|
||||||
Id: c.ApplicationCredentialId,
|
|
||||||
Name: c.ApplicationCredentialName,
|
|
||||||
Secret: c.ApplicationCredentialSecret,
|
|
||||||
User: user,
|
|
||||||
}
|
|
||||||
} else if c.UserName == "" && c.UserId == "" {
|
|
||||||
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
|
|
||||||
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
|
|
||||||
} else {
|
|
||||||
v3.Auth.Identity.Methods = []string{v3AuthMethodPassword}
|
|
||||||
v3.Auth.Identity.Password = &v3AuthPassword{
|
|
||||||
User: v3User{
|
|
||||||
Name: c.UserName,
|
|
||||||
Id: c.UserId,
|
|
||||||
Password: c.ApiKey,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var domain *v3Domain
|
|
||||||
|
|
||||||
if c.Domain != "" {
|
|
||||||
domain = &v3Domain{Name: c.Domain}
|
|
||||||
} else if c.DomainId != "" {
|
|
||||||
domain = &v3Domain{Id: c.DomainId}
|
|
||||||
}
|
|
||||||
v3.Auth.Identity.Password.User.Domain = domain
|
|
||||||
}
|
|
||||||
|
|
||||||
if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential {
|
|
||||||
if c.TrustId != "" {
|
|
||||||
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
|
||||||
} else if c.TenantId != "" || c.Tenant != "" {
|
|
||||||
|
|
||||||
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
|
|
||||||
|
|
||||||
if c.TenantId != "" {
|
|
||||||
v3.Auth.Scope.Project.Id = c.TenantId
|
|
||||||
} else if c.Tenant != "" {
|
|
||||||
v3.Auth.Scope.Project.Name = c.Tenant
|
|
||||||
switch {
|
|
||||||
case c.TenantDomain != "":
|
|
||||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
|
|
||||||
case c.TenantDomainId != "":
|
|
||||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
|
|
||||||
case c.Domain != "":
|
|
||||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
|
|
||||||
case c.DomainId != "":
|
|
||||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
|
|
||||||
default:
|
|
||||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v3i = v3
|
|
||||||
|
|
||||||
body, err := json.Marshal(v3i)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
url := c.AuthUrl
|
|
||||||
if !strings.HasSuffix(url, "/") {
|
|
||||||
url += "/"
|
|
||||||
}
|
|
||||||
url += "auth/tokens"
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("User-Agent", c.UserAgent)
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) Response(resp *http.Response) error {
|
|
||||||
auth.Auth = &v3AuthResponse{}
|
|
||||||
auth.Headers = resp.Header
|
|
||||||
err := readJson(resp, auth.Auth)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string {
|
|
||||||
for _, catalog := range auth.Auth.Token.Catalog {
|
|
||||||
if catalog.Type == Type {
|
|
||||||
for _, endpoint := range catalog.Endpoints {
|
|
||||||
if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) {
|
|
||||||
return endpoint.Url
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) StorageUrl(Internal bool) string {
|
|
||||||
endpointType := EndpointTypePublic
|
|
||||||
if Internal {
|
|
||||||
endpointType = EndpointTypeInternal
|
|
||||||
}
|
|
||||||
return auth.StorageUrlForEndpoint(endpointType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
|
|
||||||
return auth.endpointUrl("object-store", endpointType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) Token() string {
|
|
||||||
return auth.Headers.Get("X-Subject-Token")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) Expires() time.Time {
|
|
||||||
t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{} // return Zero if not parsed
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (auth *v3Auth) CdnUrl() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
28
vendor/github.com/ncw/swift/compatibility_1_0.go
generated
vendored
28
vendor/github.com/ncw/swift/compatibility_1_0.go
generated
vendored
|
@ -1,28 +0,0 @@
|
||||||
// Go 1.0 compatibility functions
|
|
||||||
|
|
||||||
// +build !go1.1
|
|
||||||
|
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cancel the request - doesn't work under < go 1.1
|
|
||||||
func cancelRequest(transport http.RoundTripper, req *http.Request) {
|
|
||||||
log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset a timer - Doesn't work properly < go 1.1
|
|
||||||
//
|
|
||||||
// This is quite hard to do properly under go < 1.1 so we do a crude
|
|
||||||
// approximation and hope that everyone upgrades to go 1.1 quickly
|
|
||||||
func resetTimer(t *time.Timer, d time.Duration) {
|
|
||||||
t.Stop()
|
|
||||||
// Very likely this doesn't actually work if we are already
|
|
||||||
// selecting on t.C. However we've stopped the original timer
|
|
||||||
// so won't break transfers but may not time them out :-(
|
|
||||||
*t = *time.NewTimer(d)
|
|
||||||
}
|
|
24
vendor/github.com/ncw/swift/compatibility_1_1.go
generated
vendored
24
vendor/github.com/ncw/swift/compatibility_1_1.go
generated
vendored
|
@ -1,24 +0,0 @@
|
||||||
// Go 1.1 and later compatibility functions
|
|
||||||
//
|
|
||||||
// +build go1.1
|
|
||||||
|
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cancel the request
|
|
||||||
func cancelRequest(transport http.RoundTripper, req *http.Request) {
|
|
||||||
if tr, ok := transport.(interface {
|
|
||||||
CancelRequest(*http.Request)
|
|
||||||
}); ok {
|
|
||||||
tr.CancelRequest(req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset a timer
|
|
||||||
func resetTimer(t *time.Timer, d time.Duration) {
|
|
||||||
t.Reset(d)
|
|
||||||
}
|
|
23
vendor/github.com/ncw/swift/compatibility_1_6.go
generated
vendored
23
vendor/github.com/ncw/swift/compatibility_1_6.go
generated
vendored
|
@ -1,23 +0,0 @@
|
||||||
// +build go1.6
|
|
||||||
|
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const IS_AT_LEAST_GO_16 = true
|
|
||||||
|
|
||||||
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {
|
|
||||||
tr.ExpectContinueTimeout = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {
|
|
||||||
if req.Body != nil {
|
|
||||||
req.Header.Add("Expect", "100-continue")
|
|
||||||
}
|
|
||||||
if !hasContentLength {
|
|
||||||
req.TransferEncoding = []string{"chunked"}
|
|
||||||
}
|
|
||||||
}
|
|
13
vendor/github.com/ncw/swift/compatibility_not_1_6.go
generated
vendored
13
vendor/github.com/ncw/swift/compatibility_not_1_6.go
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
// +build !go1.6
|
|
||||||
|
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const IS_AT_LEAST_GO_16 = false
|
|
||||||
|
|
||||||
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {}
|
|
||||||
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {}
|
|
136
vendor/github.com/ncw/swift/dlo.go
generated
vendored
136
vendor/github.com/ncw/swift/dlo.go
generated
vendored
|
@ -1,136 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DynamicLargeObjectCreateFile represents an open static large object
|
|
||||||
type DynamicLargeObjectCreateFile struct {
|
|
||||||
largeObjectCreateFile
|
|
||||||
}
|
|
||||||
|
|
||||||
// DynamicLargeObjectCreateFile creates a dynamic large object
|
|
||||||
// returning an object which satisfies io.Writer, io.Seeker, io.Closer
|
|
||||||
// and io.ReaderFrom. The flags are as passes to the
|
|
||||||
// largeObjectCreate method.
|
|
||||||
func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
|
||||||
lo, err := c.largeObjectCreate(opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return withBuffer(opts, &DynamicLargeObjectCreateFile{
|
|
||||||
largeObjectCreateFile: *lo,
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DynamicLargeObjectCreate creates or truncates an existing dynamic
|
|
||||||
// large object returning a writeable object. This sets opts.Flags to
|
|
||||||
// an appropriate value before calling DynamicLargeObjectCreateFile
|
|
||||||
func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
|
||||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
|
||||||
return c.DynamicLargeObjectCreateFile(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
|
|
||||||
func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
|
|
||||||
return c.LargeObjectDelete(container, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
|
||||||
func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
|
||||||
info, headers, err := c.Object(dstContainer, srcObjectName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
|
|
||||||
if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createDLOManifest creates a dynamic large object manifest
|
|
||||||
func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error {
|
|
||||||
headers := make(Headers)
|
|
||||||
headers["X-Object-Manifest"] = prefix
|
|
||||||
manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := manifest.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close satisfies the io.Closer interface
|
|
||||||
func (file *DynamicLargeObjectCreateFile) Close() error {
|
|
||||||
return file.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file *DynamicLargeObjectCreateFile) Flush() error {
|
|
||||||
err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) {
|
|
||||||
//a simple container listing works 99.9% of the time
|
|
||||||
segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
hasObjectName := make(map[string]struct{})
|
|
||||||
for _, segment := range segments {
|
|
||||||
hasObjectName[segment.Name] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
//The container listing might be outdated (i.e. not contain all existing
|
|
||||||
//segment objects yet) because of temporary inconsistency (Swift is only
|
|
||||||
//eventually consistent!). Check its completeness.
|
|
||||||
segmentNumber := 0
|
|
||||||
for {
|
|
||||||
segmentNumber++
|
|
||||||
segmentName := getSegment(segmentPath, segmentNumber)
|
|
||||||
if _, seen := hasObjectName[segmentName]; seen {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
//This segment is missing in the container listing. Use a more reliable
|
|
||||||
//request to check its existence. (HEAD requests on segments are
|
|
||||||
//guaranteed to return the correct metadata, except for the pathological
|
|
||||||
//case of an outage of large parts of the Swift cluster or its network,
|
|
||||||
//since every segment is only written once.)
|
|
||||||
segment, _, err := c.Object(segmentContainer, segmentName)
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
//found new segment -> add it in the correct position and keep
|
|
||||||
//going, more might be missing
|
|
||||||
if segmentNumber <= len(segments) {
|
|
||||||
segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
|
|
||||||
segments[segmentNumber-1] = segment
|
|
||||||
} else {
|
|
||||||
segments = append(segments, segment)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case ObjectNotFound:
|
|
||||||
//This segment is missing. Since we upload segments sequentially,
|
|
||||||
//there won't be any more segments after it.
|
|
||||||
return segments, nil
|
|
||||||
default:
|
|
||||||
return nil, err //unexpected error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
19
vendor/github.com/ncw/swift/doc.go
generated
vendored
19
vendor/github.com/ncw/swift/doc.go
generated
vendored
|
@ -1,19 +0,0 @@
|
||||||
/*
|
|
||||||
Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files
|
|
||||||
|
|
||||||
Standard Usage
|
|
||||||
|
|
||||||
Most of the work is done through the Container*() and Object*() methods.
|
|
||||||
|
|
||||||
All methods are safe to use concurrently in multiple go routines.
|
|
||||||
|
|
||||||
Object Versioning
|
|
||||||
|
|
||||||
As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system.
|
|
||||||
|
|
||||||
Rackspace Sub Module
|
|
||||||
|
|
||||||
This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects.
|
|
||||||
|
|
||||||
*/
|
|
||||||
package swift
|
|
448
vendor/github.com/ncw/swift/largeobjects.go
generated
vendored
448
vendor/github.com/ncw/swift/largeobjects.go
generated
vendored
|
@ -1,448 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha1"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
gopath "path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NotLargeObject is returned if an operation is performed on an object which isn't large.
|
|
||||||
var NotLargeObject = errors.New("Not a large object")
|
|
||||||
|
|
||||||
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
|
|
||||||
var readAfterWriteTimeout = 15 * time.Second
|
|
||||||
|
|
||||||
// readAfterWriteWait defines the time to sleep between two retries
|
|
||||||
var readAfterWriteWait = 200 * time.Millisecond
|
|
||||||
|
|
||||||
// largeObjectCreateFile represents an open static or dynamic large object
|
|
||||||
type largeObjectCreateFile struct {
|
|
||||||
conn *Connection
|
|
||||||
container string
|
|
||||||
objectName string
|
|
||||||
currentLength int64
|
|
||||||
filePos int64
|
|
||||||
chunkSize int64
|
|
||||||
segmentContainer string
|
|
||||||
prefix string
|
|
||||||
contentType string
|
|
||||||
checkHash bool
|
|
||||||
segments []Object
|
|
||||||
headers Headers
|
|
||||||
minChunkSize int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func swiftSegmentPath(path string) (string, error) {
|
|
||||||
checksum := sha1.New()
|
|
||||||
random := make([]byte, 32)
|
|
||||||
if _, err := rand.Read(random); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
|
|
||||||
return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSegment(segmentPath string, partNumber int) string {
|
|
||||||
return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseFullPath(manifest string) (container string, prefix string) {
|
|
||||||
components := strings.SplitN(manifest, "/", 2)
|
|
||||||
container = components[0]
|
|
||||||
if len(components) > 1 {
|
|
||||||
prefix = components[1]
|
|
||||||
}
|
|
||||||
return container, prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (headers Headers) IsLargeObjectDLO() bool {
|
|
||||||
_, isDLO := headers["X-Object-Manifest"]
|
|
||||||
return isDLO
|
|
||||||
}
|
|
||||||
|
|
||||||
func (headers Headers) IsLargeObjectSLO() bool {
|
|
||||||
_, isSLO := headers["X-Static-Large-Object"]
|
|
||||||
return isSLO
|
|
||||||
}
|
|
||||||
|
|
||||||
func (headers Headers) IsLargeObject() bool {
|
|
||||||
return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) {
|
|
||||||
if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
|
|
||||||
segmentContainer, segmentPath := parseFullPath(manifest)
|
|
||||||
segments, err := c.getAllDLOSegments(segmentContainer, segmentPath)
|
|
||||||
return segmentContainer, segments, err
|
|
||||||
}
|
|
||||||
if headers.IsLargeObjectSLO() {
|
|
||||||
return c.getAllSLOSegments(container, path)
|
|
||||||
}
|
|
||||||
return "", nil, NotLargeObject
|
|
||||||
}
|
|
||||||
|
|
||||||
// LargeObjectOpts describes how a large object should be created
|
|
||||||
type LargeObjectOpts struct {
|
|
||||||
Container string // Name of container to place object
|
|
||||||
ObjectName string // Name of object
|
|
||||||
Flags int // Creation flags
|
|
||||||
CheckHash bool // If set Check the hash
|
|
||||||
Hash string // If set use this hash to check
|
|
||||||
ContentType string // Content-Type of the object
|
|
||||||
Headers Headers // Additional headers to upload the object with
|
|
||||||
ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
|
|
||||||
MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
|
|
||||||
SegmentContainer string // Name of the container to place segments
|
|
||||||
SegmentPrefix string // Prefix to use for the segments
|
|
||||||
NoBuffer bool // Prevents using a bufio.Writer to write segments
|
|
||||||
}
|
|
||||||
|
|
||||||
type LargeObjectFile interface {
|
|
||||||
io.Writer
|
|
||||||
io.Seeker
|
|
||||||
io.Closer
|
|
||||||
Size() int64
|
|
||||||
Flush() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
|
|
||||||
//
|
|
||||||
// opts.Flags can have the following bits set
|
|
||||||
// os.TRUNC - remove the contents of the large object if it exists
|
|
||||||
// os.APPEND - write at the end of the large object
|
|
||||||
func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
|
|
||||||
var (
|
|
||||||
segmentPath string
|
|
||||||
segmentContainer string
|
|
||||||
segments []Object
|
|
||||||
currentLength int64
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if opts.SegmentPrefix != "" {
|
|
||||||
segmentPath = opts.SegmentPrefix
|
|
||||||
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
|
|
||||||
if opts.Flags&os.O_TRUNC != 0 {
|
|
||||||
c.LargeObjectDelete(opts.Container, opts.ObjectName)
|
|
||||||
} else {
|
|
||||||
currentLength = info.Bytes
|
|
||||||
if headers.IsLargeObject() {
|
|
||||||
segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(segments) > 0 {
|
|
||||||
segmentPath = gopath.Dir(segments[0].Name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
segments = append(segments, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if err != ObjectNotFound {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// segmentContainer is not empty when the manifest already existed
|
|
||||||
if segmentContainer == "" {
|
|
||||||
if opts.SegmentContainer != "" {
|
|
||||||
segmentContainer = opts.SegmentContainer
|
|
||||||
} else {
|
|
||||||
segmentContainer = opts.Container + "_segments"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file := &largeObjectCreateFile{
|
|
||||||
conn: c,
|
|
||||||
checkHash: opts.CheckHash,
|
|
||||||
container: opts.Container,
|
|
||||||
objectName: opts.ObjectName,
|
|
||||||
chunkSize: opts.ChunkSize,
|
|
||||||
minChunkSize: opts.MinChunkSize,
|
|
||||||
headers: opts.Headers,
|
|
||||||
segmentContainer: segmentContainer,
|
|
||||||
prefix: segmentPath,
|
|
||||||
segments: segments,
|
|
||||||
currentLength: currentLength,
|
|
||||||
}
|
|
||||||
|
|
||||||
if file.chunkSize == 0 {
|
|
||||||
file.chunkSize = 10 * 1024 * 1024
|
|
||||||
}
|
|
||||||
|
|
||||||
if file.minChunkSize > file.chunkSize {
|
|
||||||
file.chunkSize = file.minChunkSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Flags&os.O_APPEND != 0 {
|
|
||||||
file.filePos = currentLength
|
|
||||||
}
|
|
||||||
|
|
||||||
return file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LargeObjectDelete deletes the large object named by container, path
|
|
||||||
func (c *Connection) LargeObjectDelete(container string, objectName string) error {
|
|
||||||
_, headers, err := c.Object(container, objectName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var objects [][]string
|
|
||||||
if headers.IsLargeObject() {
|
|
||||||
segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, obj := range segments {
|
|
||||||
objects = append(objects, []string{segmentContainer, obj.Name})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
objects = append(objects, []string{container, objectName})
|
|
||||||
|
|
||||||
info, err := c.cachedQueryInfo()
|
|
||||||
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
|
|
||||||
filenames := make([]string, len(objects))
|
|
||||||
for i, obj := range objects {
|
|
||||||
filenames[i] = obj[0] + "/" + obj[1]
|
|
||||||
}
|
|
||||||
_, err = c.doBulkDelete(filenames)
|
|
||||||
// Don't fail on ObjectNotFound because eventual consistency
|
|
||||||
// makes this situation normal.
|
|
||||||
if err != nil && err != Forbidden && err != ObjectNotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _, obj := range objects {
|
|
||||||
if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LargeObjectGetSegments returns all the segments that compose an object
|
|
||||||
// If the object is a Dynamic Large Object (DLO), it just returns the objects
|
|
||||||
// that have the prefix as indicated by the manifest.
|
|
||||||
// If the object is a Static Large Object (SLO), it retrieves the JSON content
|
|
||||||
// of the manifest and return all the segments of it.
|
|
||||||
func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) {
|
|
||||||
_, headers, err := c.Object(container, path)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.getAllSegments(container, path, headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek sets the offset for the next write operation
|
|
||||||
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
switch whence {
|
|
||||||
case 0:
|
|
||||||
file.filePos = offset
|
|
||||||
case 1:
|
|
||||||
file.filePos += offset
|
|
||||||
case 2:
|
|
||||||
file.filePos = file.currentLength + offset
|
|
||||||
default:
|
|
||||||
return -1, fmt.Errorf("invalid value for whence")
|
|
||||||
}
|
|
||||||
if file.filePos < 0 {
|
|
||||||
return -1, fmt.Errorf("negative offset")
|
|
||||||
}
|
|
||||||
return file.filePos, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file *largeObjectCreateFile) Size() int64 {
|
|
||||||
return file.currentLength
|
|
||||||
}
|
|
||||||
|
|
||||||
func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
|
|
||||||
endTimer := time.NewTimer(readAfterWriteTimeout)
|
|
||||||
defer endTimer.Stop()
|
|
||||||
waitingTime := readAfterWriteWait
|
|
||||||
for {
|
|
||||||
var headers Headers
|
|
||||||
var sz int64
|
|
||||||
if headers, sz, err = fn(); err == nil {
|
|
||||||
if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
waitTimer := time.NewTimer(waitingTime)
|
|
||||||
select {
|
|
||||||
case <-endTimer.C:
|
|
||||||
waitTimer.Stop()
|
|
||||||
err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
|
|
||||||
return
|
|
||||||
case <-waitTimer.C:
|
|
||||||
waitingTime *= 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) {
|
|
||||||
err = withLORetry(expectedSize, func() (Headers, int64, error) {
|
|
||||||
var info Object
|
|
||||||
var headers Headers
|
|
||||||
info, headers, err = c.objectBase(container, objectName)
|
|
||||||
if err != nil {
|
|
||||||
return headers, 0, err
|
|
||||||
}
|
|
||||||
return headers, info.Bytes, nil
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write satisfies the io.Writer interface
|
|
||||||
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
|
|
||||||
var sz int64
|
|
||||||
var relativeFilePos int
|
|
||||||
writeSegmentIdx := 0
|
|
||||||
for i, obj := range file.segments {
|
|
||||||
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
|
|
||||||
relativeFilePos = int(file.filePos - sz)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
writeSegmentIdx++
|
|
||||||
sz += obj.Bytes
|
|
||||||
}
|
|
||||||
sizeToWrite := len(buf)
|
|
||||||
for offset := 0; offset < sizeToWrite; {
|
|
||||||
newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if writeSegmentIdx < len(file.segments) {
|
|
||||||
file.segments[writeSegmentIdx] = *newSegment
|
|
||||||
} else {
|
|
||||||
file.segments = append(file.segments, *newSegment)
|
|
||||||
}
|
|
||||||
offset += n
|
|
||||||
writeSegmentIdx++
|
|
||||||
relativeFilePos = 0
|
|
||||||
}
|
|
||||||
file.filePos += int64(sizeToWrite)
|
|
||||||
file.currentLength = 0
|
|
||||||
for _, obj := range file.segments {
|
|
||||||
file.currentLength += obj.Bytes
|
|
||||||
}
|
|
||||||
return sizeToWrite, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) {
|
|
||||||
var (
|
|
||||||
readers []io.Reader
|
|
||||||
existingSegment *Object
|
|
||||||
segmentSize int
|
|
||||||
)
|
|
||||||
segmentName := getSegment(file.prefix, writeSegmentIdx+1)
|
|
||||||
sizeToRead := int(file.chunkSize)
|
|
||||||
if writeSegmentIdx < len(file.segments) {
|
|
||||||
existingSegment = &file.segments[writeSegmentIdx]
|
|
||||||
if writeSegmentIdx != len(file.segments)-1 {
|
|
||||||
sizeToRead = int(existingSegment.Bytes)
|
|
||||||
}
|
|
||||||
if relativeFilePos > 0 {
|
|
||||||
headers := make(Headers)
|
|
||||||
headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
|
|
||||||
existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer existingSegmentReader.Close()
|
|
||||||
sizeToRead -= relativeFilePos
|
|
||||||
segmentSize += relativeFilePos
|
|
||||||
readers = []io.Reader{existingSegmentReader}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if sizeToRead > len(buf) {
|
|
||||||
sizeToRead = len(buf)
|
|
||||||
}
|
|
||||||
segmentSize += sizeToRead
|
|
||||||
readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
|
|
||||||
if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
|
|
||||||
headers := make(Headers)
|
|
||||||
headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
|
|
||||||
tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer tailSegmentReader.Close()
|
|
||||||
segmentSize = int(existingSegment.Bytes)
|
|
||||||
readers = append(readers, tailSegmentReader)
|
|
||||||
}
|
|
||||||
segmentReader := io.MultiReader(readers...)
|
|
||||||
headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
|
|
||||||
if !opts.NoBuffer {
|
|
||||||
return &bufferedLargeObjectFile{
|
|
||||||
LargeObjectFile: lo,
|
|
||||||
bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lo
|
|
||||||
}
|
|
||||||
|
|
||||||
type bufferedLargeObjectFile struct {
|
|
||||||
LargeObjectFile
|
|
||||||
bw *bufio.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (blo *bufferedLargeObjectFile) Close() error {
|
|
||||||
err := blo.bw.Flush()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return blo.LargeObjectFile.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
|
|
||||||
return blo.bw.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
err := blo.bw.Flush()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return blo.LargeObjectFile.Seek(offset, whence)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (blo *bufferedLargeObjectFile) Size() int64 {
|
|
||||||
return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (blo *bufferedLargeObjectFile) Flush() error {
|
|
||||||
err := blo.bw.Flush()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return blo.LargeObjectFile.Flush()
|
|
||||||
}
|
|
174
vendor/github.com/ncw/swift/meta.go
generated
vendored
174
vendor/github.com/ncw/swift/meta.go
generated
vendored
|
@ -1,174 +0,0 @@
|
||||||
// Metadata manipulation in and out of Headers
|
|
||||||
|
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Metadata stores account, container or object metadata.
|
|
||||||
type Metadata map[string]string
|
|
||||||
|
|
||||||
// Metadata gets the Metadata starting with the metaPrefix out of the Headers.
|
|
||||||
//
|
|
||||||
// The keys in the Metadata will be converted to lower case
|
|
||||||
func (h Headers) Metadata(metaPrefix string) Metadata {
|
|
||||||
m := Metadata{}
|
|
||||||
metaPrefix = http.CanonicalHeaderKey(metaPrefix)
|
|
||||||
for key, value := range h {
|
|
||||||
if strings.HasPrefix(key, metaPrefix) {
|
|
||||||
metaKey := strings.ToLower(key[len(metaPrefix):])
|
|
||||||
m[metaKey] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountMetadata converts Headers from account to a Metadata.
|
|
||||||
//
|
|
||||||
// The keys in the Metadata will be converted to lower case.
|
|
||||||
func (h Headers) AccountMetadata() Metadata {
|
|
||||||
return h.Metadata("X-Account-Meta-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerMetadata converts Headers from container to a Metadata.
|
|
||||||
//
|
|
||||||
// The keys in the Metadata will be converted to lower case.
|
|
||||||
func (h Headers) ContainerMetadata() Metadata {
|
|
||||||
return h.Metadata("X-Container-Meta-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectMetadata converts Headers from object to a Metadata.
|
|
||||||
//
|
|
||||||
// The keys in the Metadata will be converted to lower case.
|
|
||||||
func (h Headers) ObjectMetadata() Metadata {
|
|
||||||
return h.Metadata("X-Object-Meta-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Headers convert the Metadata starting with the metaPrefix into a
|
|
||||||
// Headers.
|
|
||||||
//
|
|
||||||
// The keys in the Metadata will be converted from lower case to http
|
|
||||||
// Canonical (see http.CanonicalHeaderKey).
|
|
||||||
func (m Metadata) Headers(metaPrefix string) Headers {
|
|
||||||
h := Headers{}
|
|
||||||
for key, value := range m {
|
|
||||||
key = http.CanonicalHeaderKey(metaPrefix + key)
|
|
||||||
h[key] = value
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountHeaders converts the Metadata for the account.
|
|
||||||
func (m Metadata) AccountHeaders() Headers {
|
|
||||||
return m.Headers("X-Account-Meta-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerHeaders converts the Metadata for the container.
|
|
||||||
func (m Metadata) ContainerHeaders() Headers {
|
|
||||||
return m.Headers("X-Container-Meta-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectHeaders converts the Metadata for the object.
|
|
||||||
func (m Metadata) ObjectHeaders() Headers {
|
|
||||||
return m.Headers("X-Object-Meta-")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turns a number of ns into a floating point string in seconds
|
|
||||||
//
|
|
||||||
// Trims trailing zeros and guaranteed to be perfectly accurate
|
|
||||||
func nsToFloatString(ns int64) string {
|
|
||||||
if ns < 0 {
|
|
||||||
return "-" + nsToFloatString(-ns)
|
|
||||||
}
|
|
||||||
result := fmt.Sprintf("%010d", ns)
|
|
||||||
split := len(result) - 9
|
|
||||||
result, decimals := result[:split], result[split:]
|
|
||||||
decimals = strings.TrimRight(decimals, "0")
|
|
||||||
if decimals != "" {
|
|
||||||
result += "."
|
|
||||||
result += decimals
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turns a floating point string in seconds into a ns integer
|
|
||||||
//
|
|
||||||
// Guaranteed to be perfectly accurate
|
|
||||||
func floatStringToNs(s string) (int64, error) {
|
|
||||||
const zeros = "000000000"
|
|
||||||
if point := strings.IndexRune(s, '.'); point >= 0 {
|
|
||||||
tail := s[point+1:]
|
|
||||||
if fill := 9 - len(tail); fill < 0 {
|
|
||||||
tail = tail[:9]
|
|
||||||
} else {
|
|
||||||
tail += zeros[:fill]
|
|
||||||
}
|
|
||||||
s = s[:point] + tail
|
|
||||||
} else if len(s) > 0 { // Make sure empty string produces an error
|
|
||||||
s += zeros
|
|
||||||
}
|
|
||||||
return strconv.ParseInt(s, 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FloatStringToTime converts a floating point number string to a time.Time
|
|
||||||
//
|
|
||||||
// The string is floating point number of seconds since the epoch
|
|
||||||
// (Unix time). The number should be in fixed point format (not
|
|
||||||
// exponential), eg "1354040105.123456789" which represents the time
|
|
||||||
// "2012-11-27T18:15:05.123456789Z"
|
|
||||||
//
|
|
||||||
// Some care is taken to preserve all the accuracy in the time.Time
|
|
||||||
// (which wouldn't happen with a naive conversion through float64) so
|
|
||||||
// a round trip conversion won't change the data.
|
|
||||||
//
|
|
||||||
// If an error is returned then time will be returned as the zero time.
|
|
||||||
func FloatStringToTime(s string) (t time.Time, err error) {
|
|
||||||
ns, err := floatStringToNs(s)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t = time.Unix(0, ns)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeToFloatString converts a time.Time object to a floating point string
|
|
||||||
//
|
|
||||||
// The string is floating point number of seconds since the epoch
|
|
||||||
// (Unix time). The number is in fixed point format (not
|
|
||||||
// exponential), eg "1354040105.123456789" which represents the time
|
|
||||||
// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped
|
|
||||||
// from the output.
|
|
||||||
//
|
|
||||||
// Some care is taken to preserve all the accuracy in the time.Time
|
|
||||||
// (which wouldn't happen with a naive conversion through float64) so
|
|
||||||
// a round trip conversion won't change the data.
|
|
||||||
func TimeToFloatString(t time.Time) string {
|
|
||||||
return nsToFloatString(t.UnixNano())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetModTime reads a modification time (mtime) from a Metadata object
|
|
||||||
//
|
|
||||||
// This is a defacto standard (used in the official python-swiftclient
|
|
||||||
// amongst others) for storing the modification time (as read using
|
|
||||||
// os.Stat) for an object. It is stored using the key 'mtime', which
|
|
||||||
// for example when written to an object will be 'X-Object-Meta-Mtime'.
|
|
||||||
//
|
|
||||||
// If an error is returned then time will be returned as the zero time.
|
|
||||||
func (m Metadata) GetModTime() (t time.Time, err error) {
|
|
||||||
return FloatStringToTime(m["mtime"])
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime writes an modification time (mtime) to a Metadata object
|
|
||||||
//
|
|
||||||
// This is a defacto standard (used in the official python-swiftclient
|
|
||||||
// amongst others) for storing the modification time (as read using
|
|
||||||
// os.Stat) for an object. It is stored using the key 'mtime', which
|
|
||||||
// for example when written to an object will be 'X-Object-Meta-Mtime'.
|
|
||||||
func (m Metadata) SetModTime(t time.Time) {
|
|
||||||
m["mtime"] = TimeToFloatString(t)
|
|
||||||
}
|
|
55
vendor/github.com/ncw/swift/notes.txt
generated
vendored
55
vendor/github.com/ncw/swift/notes.txt
generated
vendored
|
@ -1,55 +0,0 @@
|
||||||
Notes on Go Swift
|
|
||||||
=================
|
|
||||||
|
|
||||||
Make a builder style interface like the Google Go APIs? Advantages
|
|
||||||
are that it is easy to add named methods to the service object to do
|
|
||||||
specific things. Slightly less efficient. Not sure about how to
|
|
||||||
return extra stuff though - in an object?
|
|
||||||
|
|
||||||
Make a container struct so these could be methods on it?
|
|
||||||
|
|
||||||
Make noResponse check for 204?
|
|
||||||
|
|
||||||
Make storage public so it can be extended easily?
|
|
||||||
|
|
||||||
Rename to go-swift to match user agent string?
|
|
||||||
|
|
||||||
Reconnect on auth error - 401 when token expires isn't tested
|
|
||||||
|
|
||||||
Make more api compatible with python cloudfiles?
|
|
||||||
|
|
||||||
Retry operations on timeout / network errors?
|
|
||||||
- also 408 error
|
|
||||||
- GET requests only?
|
|
||||||
|
|
||||||
Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock
|
|
||||||
|
|
||||||
Add extra headers field to Connection (for via etc)
|
|
||||||
|
|
||||||
Make errors use an error heirachy then can catch them with a type assertion
|
|
||||||
|
|
||||||
Error(...)
|
|
||||||
ObjectCorrupted{ Error }
|
|
||||||
|
|
||||||
Make a Debug flag in connection for logging stuff
|
|
||||||
|
|
||||||
Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc
|
|
||||||
|
|
||||||
Object range
|
|
||||||
|
|
||||||
Object create, update with X-Delete-At or X-Delete-After
|
|
||||||
|
|
||||||
Large object support
|
|
||||||
- check uploads are less than 5GB in normal mode?
|
|
||||||
|
|
||||||
Access control CORS?
|
|
||||||
|
|
||||||
Swift client retries and backs off for all types of errors
|
|
||||||
|
|
||||||
Implement net error interface?
|
|
||||||
|
|
||||||
type Error interface {
|
|
||||||
error
|
|
||||||
Timeout() bool // Is the error a timeout?
|
|
||||||
Temporary() bool // Is the error temporary?
|
|
||||||
}
|
|
171
vendor/github.com/ncw/swift/slo.go
generated
vendored
171
vendor/github.com/ncw/swift/slo.go
generated
vendored
|
@ -1,171 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StaticLargeObjectCreateFile represents an open static large object
|
|
||||||
type StaticLargeObjectCreateFile struct {
|
|
||||||
largeObjectCreateFile
|
|
||||||
}
|
|
||||||
|
|
||||||
var SLONotSupported = errors.New("SLO not supported")
|
|
||||||
|
|
||||||
type swiftSegment struct {
|
|
||||||
Path string `json:"path,omitempty"`
|
|
||||||
Etag string `json:"etag,omitempty"`
|
|
||||||
Size int64 `json:"size_bytes,omitempty"`
|
|
||||||
// When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
|
|
||||||
// but when querying the JSON content of a manifest with the `multipart-manifest=get`
|
|
||||||
// parameter, Swift names those attributes `name`, `hash` and `bytes`.
|
|
||||||
// We use all the different attributes names in this structure to be able to use
|
|
||||||
// the same structure for both uploading and retrieving.
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Hash string `json:"hash,omitempty"`
|
|
||||||
Bytes int64 `json:"bytes,omitempty"`
|
|
||||||
ContentType string `json:"content_type,omitempty"`
|
|
||||||
LastModified string `json:"last_modified,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// StaticLargeObjectCreateFile creates a static large object returning
|
|
||||||
// an object which satisfies io.Writer, io.Seeker, io.Closer and
|
|
||||||
// io.ReaderFrom. The flags are as passed to the largeObjectCreate
|
|
||||||
// method.
|
|
||||||
func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
|
||||||
info, err := c.cachedQueryInfo()
|
|
||||||
if err != nil || !info.SupportsSLO() {
|
|
||||||
return nil, SLONotSupported
|
|
||||||
}
|
|
||||||
realMinChunkSize := info.SLOMinSegmentSize()
|
|
||||||
if realMinChunkSize > opts.MinChunkSize {
|
|
||||||
opts.MinChunkSize = realMinChunkSize
|
|
||||||
}
|
|
||||||
lo, err := c.largeObjectCreate(opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return withBuffer(opts, &StaticLargeObjectCreateFile{
|
|
||||||
largeObjectCreateFile: *lo,
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StaticLargeObjectCreate creates or truncates an existing static
|
|
||||||
// large object returning a writeable object. This sets opts.Flags to
|
|
||||||
// an appropriate value before calling StaticLargeObjectCreateFile
|
|
||||||
func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
|
||||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
|
||||||
return c.StaticLargeObjectCreateFile(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StaticLargeObjectDelete deletes a static large object and all of its segments.
|
|
||||||
func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
|
|
||||||
info, err := c.cachedQueryInfo()
|
|
||||||
if err != nil || !info.SupportsSLO() {
|
|
||||||
return SLONotSupported
|
|
||||||
}
|
|
||||||
return c.LargeObjectDelete(container, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
|
||||||
func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
|
||||||
swiftInfo, err := c.cachedQueryInfo()
|
|
||||||
if err != nil || !swiftInfo.SupportsSLO() {
|
|
||||||
return SLONotSupported
|
|
||||||
}
|
|
||||||
info, headers, err := c.Object(srcContainer, srcObjectName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
//copy only metadata during move (other headers might not be safe for copying)
|
|
||||||
headers = headers.ObjectMetadata().ObjectHeaders()
|
|
||||||
|
|
||||||
if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSLOManifest creates a static large object manifest
|
|
||||||
func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error {
|
|
||||||
sloSegments := make([]swiftSegment, len(segments))
|
|
||||||
for i, segment := range segments {
|
|
||||||
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
|
|
||||||
sloSegments[i].Etag = segment.Hash
|
|
||||||
sloSegments[i].Size = segment.Bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := json.Marshal(sloSegments)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
values := url.Values{}
|
|
||||||
values.Set("multipart-manifest", "put")
|
|
||||||
if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file *StaticLargeObjectCreateFile) Close() error {
|
|
||||||
return file.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (file *StaticLargeObjectCreateFile) Flush() error {
|
|
||||||
if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) {
|
|
||||||
var (
|
|
||||||
segmentList []swiftSegment
|
|
||||||
segments []Object
|
|
||||||
segPath string
|
|
||||||
segmentContainer string
|
|
||||||
)
|
|
||||||
|
|
||||||
values := url.Values{}
|
|
||||||
values.Set("multipart-manifest", "get")
|
|
||||||
|
|
||||||
file, _, err := c.objectOpen(container, path, true, nil, values)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(file)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
json.Unmarshal(content, &segmentList)
|
|
||||||
for _, segment := range segmentList {
|
|
||||||
segmentContainer, segPath = parseFullPath(segment.Name[1:])
|
|
||||||
segments = append(segments, Object{
|
|
||||||
Name: segPath,
|
|
||||||
Bytes: segment.Bytes,
|
|
||||||
Hash: segment.Hash,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return segmentContainer, segments, nil
|
|
||||||
}
|
|
2243
vendor/github.com/ncw/swift/swift.go
generated
vendored
2243
vendor/github.com/ncw/swift/swift.go
generated
vendored
File diff suppressed because it is too large
Load diff
1107
vendor/github.com/ncw/swift/swifttest/server.go
generated
vendored
1107
vendor/github.com/ncw/swift/swifttest/server.go
generated
vendored
File diff suppressed because it is too large
Load diff
59
vendor/github.com/ncw/swift/timeout_reader.go
generated
vendored
59
vendor/github.com/ncw/swift/timeout_reader.go
generated
vendored
|
@ -1,59 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An io.ReadCloser which obeys an idle timeout
|
|
||||||
type timeoutReader struct {
|
|
||||||
reader io.ReadCloser
|
|
||||||
timeout time.Duration
|
|
||||||
cancel func()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a wrapper around the reader which obeys an idle
|
|
||||||
// timeout. The cancel function is called if the timeout happens
|
|
||||||
func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader {
|
|
||||||
return &timeoutReader{
|
|
||||||
reader: reader,
|
|
||||||
timeout: timeout,
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads up to len(p) bytes into p
|
|
||||||
//
|
|
||||||
// Waits at most for timeout for the read to complete otherwise returns a timeout
|
|
||||||
func (t *timeoutReader) Read(p []byte) (int, error) {
|
|
||||||
// FIXME limit the amount of data read in one chunk so as to not exceed the timeout?
|
|
||||||
// Do the read in the background
|
|
||||||
type result struct {
|
|
||||||
n int
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
done := make(chan result, 1)
|
|
||||||
go func() {
|
|
||||||
n, err := t.reader.Read(p)
|
|
||||||
done <- result{n, err}
|
|
||||||
}()
|
|
||||||
// Wait for the read or the timeout
|
|
||||||
timer := time.NewTimer(t.timeout)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case r := <-done:
|
|
||||||
return r.n, r.err
|
|
||||||
case <-timer.C:
|
|
||||||
t.cancel()
|
|
||||||
return 0, TimeoutError
|
|
||||||
}
|
|
||||||
panic("unreachable") // for Go 1.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the channel
|
|
||||||
func (t *timeoutReader) Close() error {
|
|
||||||
return t.reader.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check it satisfies the interface
|
|
||||||
var _ io.ReadCloser = &timeoutReader{}
|
|
22
vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
22
vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then
|
|
||||||
echo "Running tests pointing to Rackspace"
|
|
||||||
export SWIFT_API_KEY=$RACKSPACE_APIKEY
|
|
||||||
export SWIFT_API_USER=$RACKSPACE_USER
|
|
||||||
export SWIFT_AUTH_URL=$RACKSPACE_AUTH
|
|
||||||
go test ./...
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then
|
|
||||||
echo "Running tests pointing to Memset"
|
|
||||||
export SWIFT_API_KEY=$MEMSET_APIKEY
|
|
||||||
export SWIFT_API_USER=$MEMSET_USER
|
|
||||||
export SWIFT_AUTH_URL=$MEMSET_AUTH
|
|
||||||
go test
|
|
||||||
fi
|
|
55
vendor/github.com/ncw/swift/watchdog_reader.go
generated
vendored
55
vendor/github.com/ncw/swift/watchdog_reader.go
generated
vendored
|
@ -1,55 +0,0 @@
|
||||||
package swift
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var watchdogChunkSize = 1 << 20 // 1 MiB
|
|
||||||
|
|
||||||
// An io.Reader which resets a watchdog timer whenever data is read
|
|
||||||
type watchdogReader struct {
|
|
||||||
timeout time.Duration
|
|
||||||
reader io.Reader
|
|
||||||
timer *time.Timer
|
|
||||||
chunkSize int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a new reader which will kick the watchdog timer whenever data is read
|
|
||||||
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
|
|
||||||
return &watchdogReader{
|
|
||||||
timeout: timeout,
|
|
||||||
reader: reader,
|
|
||||||
timer: timer,
|
|
||||||
chunkSize: watchdogChunkSize,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads up to len(p) bytes into p
|
|
||||||
func (t *watchdogReader) Read(p []byte) (int, error) {
|
|
||||||
//read from underlying reader in chunks not larger than t.chunkSize
|
|
||||||
//while resetting the watchdog timer before every read; the small chunk
|
|
||||||
//size ensures that the timer does not fire when reading a large amount of
|
|
||||||
//data from a slow connection
|
|
||||||
start := 0
|
|
||||||
end := len(p)
|
|
||||||
for start < end {
|
|
||||||
length := end - start
|
|
||||||
if length > t.chunkSize {
|
|
||||||
length = t.chunkSize
|
|
||||||
}
|
|
||||||
|
|
||||||
resetTimer(t.timer, t.timeout)
|
|
||||||
n, err := t.reader.Read(p[start : start+length])
|
|
||||||
start += n
|
|
||||||
if n == 0 || err != nil {
|
|
||||||
return start, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resetTimer(t.timer, t.timeout)
|
|
||||||
return start, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check it satisfies the interface
|
|
||||||
var _ io.Reader = &watchdogReader{}
|
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
|
@ -268,10 +268,6 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||||
github.com/mitchellh/mapstructure
|
github.com/mitchellh/mapstructure
|
||||||
# github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f
|
# github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f
|
||||||
## explicit
|
## explicit
|
||||||
# github.com/ncw/swift v1.0.47
|
|
||||||
## explicit
|
|
||||||
github.com/ncw/swift
|
|
||||||
github.com/ncw/swift/swifttest
|
|
||||||
# github.com/opencontainers/go-digest v1.0.0
|
# github.com/opencontainers/go-digest v1.0.0
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/opencontainers/go-digest
|
github.com/opencontainers/go-digest
|
||||||
|
|
Loading…
Reference in a new issue