forked from TrueCloudLab/restic
Merge pull request #975 from restic/add-swift-backend
Add swift backend
This commit is contained in:
commit
028f43299a
37 changed files with 9833 additions and 2 deletions
|
@ -4,6 +4,11 @@ released version of restic from the perspective of the user.
|
|||
Important Changes in 0.X.Y
|
||||
==========================
|
||||
|
||||
* New "swift" backend: A new backend for the OpenStack Swift cloud storage
|
||||
protocol has been added, https://wiki.openstack.org/wiki/Swift
|
||||
https://github.com/restic/restic/pull/975
|
||||
https://github.com/restic/restic/pull/648
|
||||
|
||||
Important Changes in 0.6.1
|
||||
==========================
|
||||
|
||||
|
|
|
@ -282,6 +282,67 @@ this command.
|
|||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
OpenStack Swift
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Restic can backup data to an OpenStack Swift container. Because Swift supports
|
||||
various authentication methods, credentials are passed through environment
|
||||
variables. In order to help integration with existing OpenStack installations,
|
||||
the naming convention of those variables follows official python swift client:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# For keystone v1 authentication
|
||||
$ export ST_AUTH=<MY_AUTH_URL>
|
||||
$ export ST_USER=<MY_USER_NAME>
|
||||
$ export ST_KEY=<MY_USER_PASSWORD>
|
||||
|
||||
# For keystone v2 authentication (some variables are optional)
|
||||
$ export OS_AUTH_URL=<MY_AUTH_URL>
|
||||
$ export OS_REGION_NAME=<MY_REGION_NAME>
|
||||
$ export OS_USERNAME=<MY_USERNAME>
|
||||
$ export OS_PASSWORD=<MY_PASSWORD>
|
||||
$ export OS_TENANT_ID=<MY_TENANT_ID>
|
||||
$ export OS_TENANT_NAME=<MY_TENANT_NAME>
|
||||
|
||||
# For keystone v3 authentication (some variables are optional)
|
||||
$ export OS_AUTH_URL=<MY_AUTH_URL>
|
||||
$ export OS_REGION_NAME=<MY_REGION_NAME>
|
||||
$ export OS_USERNAME=<MY_USERNAME>
|
||||
$ export OS_PASSWORD=<MY_PASSWORD>
|
||||
$ export OS_USER_DOMAIN_NAME=<MY_DOMAIN_NAME>
|
||||
$ export OS_PROJECT_NAME=<MY_PROJECT_NAME>
|
||||
$ export OS_PROJECT_DOMAIN_NAME=<MY_PROJECT_DOMAIN_NAME>
|
||||
|
||||
# For authentication based on tokens
|
||||
$ export OS_STORAGE_URL=<MY_STORAGE_URL>
|
||||
$ export OS_AUTH_TOKEN=<MY_AUTH_TOKEN>
|
||||
|
||||
|
||||
Restic should be compatible with [OpenStack RC
|
||||
file](https://docs.openstack.org/user-guide/common/cli-set-environment-variables-using-openstack-rc.html)
|
||||
in most cases.
|
||||
|
||||
Once environment variables are set up, a new repository can be created. The
|
||||
name of swift container and optional path can be specified. If
|
||||
the container does not exist, it will be created automatically:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r swift:container_name:/path init # path is optional
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend eefee03bbd at swift:container_name:/path
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
The policy of new container created by restic can be changed using environment variable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export SWIFT_DEFAULT_CONTAINER_POLICY=<MY_CONTAINER_POLICY>
|
||||
|
||||
|
||||
Password prompt on Windows
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
|
|
@ -164,6 +164,13 @@ func (env *TravisEnvironment) RunTests() error {
|
|||
msg("S3 repository not available\n")
|
||||
}
|
||||
|
||||
// if the test swift service is available, make sure that the test is not skipped
|
||||
if os.Getenv("RESTIC_TEST_SWIFT") != "" {
|
||||
ensureTests = append(ensureTests, "restic/backend/swift.TestBackendSwift")
|
||||
} else {
|
||||
msg("Swift service not available\n")
|
||||
}
|
||||
|
||||
env.env["RESTIC_TEST_DISALLOW_SKIP"] = strings.Join(ensureTests, ",")
|
||||
|
||||
if *runCrossCompile {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"restic/backend/rest"
|
||||
"restic/backend/s3"
|
||||
"restic/backend/sftp"
|
||||
"restic/backend/swift"
|
||||
"restic/debug"
|
||||
"restic/options"
|
||||
"restic/repository"
|
||||
|
@ -356,6 +357,20 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
|
|||
debug.Log("opening s3 repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
|
||||
case "swift":
|
||||
cfg := loc.Config.(swift.Config)
|
||||
|
||||
if err := swift.ApplyEnvironment("", &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("opening swift repository at %#v", cfg)
|
||||
return cfg, nil
|
||||
|
||||
case "rest":
|
||||
cfg := loc.Config.(rest.Config)
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
|
@ -391,6 +406,8 @@ func open(s string, opts options.Options) (restic.Backend, error) {
|
|||
be, err = sftp.Open(cfg.(sftp.Config))
|
||||
case "s3":
|
||||
be, err = s3.Open(cfg.(s3.Config))
|
||||
case "swift":
|
||||
be, err = swift.Open(cfg.(swift.Config))
|
||||
case "rest":
|
||||
be, err = rest.Open(cfg.(rest.Config))
|
||||
|
||||
|
@ -435,6 +452,8 @@ func create(s string, opts options.Options) (restic.Backend, error) {
|
|||
return sftp.Create(cfg.(sftp.Config))
|
||||
case "s3":
|
||||
return s3.Open(cfg.(s3.Config))
|
||||
case "swift":
|
||||
return swift.Open(cfg.(swift.Config))
|
||||
case "rest":
|
||||
return rest.Create(cfg.(rest.Config))
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"restic/backend/rest"
|
||||
"restic/backend/s3"
|
||||
"restic/backend/sftp"
|
||||
"restic/backend/swift"
|
||||
)
|
||||
|
||||
// Location specifies the location of a repository, including the method of
|
||||
|
@ -28,6 +29,7 @@ var parsers = []parser{
|
|||
{"local", local.ParseConfig},
|
||||
{"sftp", sftp.ParseConfig},
|
||||
{"s3", s3.ParseConfig},
|
||||
{"swift", swift.ParseConfig},
|
||||
{"rest", rest.ParseConfig},
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"restic/backend/rest"
|
||||
"restic/backend/s3"
|
||||
"restic/backend/sftp"
|
||||
"restic/backend/swift"
|
||||
)
|
||||
|
||||
func parseURL(s string) *url.URL {
|
||||
|
@ -195,6 +196,24 @@ var parseTests = []struct {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"swift:container17:/",
|
||||
Location{Scheme: "swift",
|
||||
Config: swift.Config{
|
||||
Container: "container17",
|
||||
Prefix: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"swift:container17:/prefix97",
|
||||
Location{Scheme: "swift",
|
||||
Config: swift.Config{
|
||||
Container: "container17",
|
||||
Prefix: "prefix97",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"rest:http://hostname.foo:1234/",
|
||||
Location{Scheme: "rest",
|
||||
|
|
96
src/restic/backend/swift/config.go
Normal file
96
src/restic/backend/swift/config.go
Normal file
|
@ -0,0 +1,96 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"os"
|
||||
"restic/errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config contains basic configuration needed to specify swift location for a swift server
|
||||
type Config struct {
|
||||
UserName string
|
||||
Domain string
|
||||
APIKey string
|
||||
AuthURL string
|
||||
Region string
|
||||
Tenant string
|
||||
TenantID string
|
||||
TenantDomain string
|
||||
TrustID string
|
||||
|
||||
StorageURL string
|
||||
AuthToken string
|
||||
|
||||
Container string
|
||||
Prefix string
|
||||
DefaultContainerPolicy string
|
||||
}
|
||||
|
||||
// ParseConfig parses the string s and extract swift's container name and prefix.
|
||||
func ParseConfig(s string) (interface{}, error) {
|
||||
data := strings.SplitN(s, ":", 3)
|
||||
if len(data) != 3 {
|
||||
return nil, errors.New("invalid URL, expected: swift:container-name:/[prefix]")
|
||||
}
|
||||
|
||||
scheme, container, prefix := data[0], data[1], data[2]
|
||||
if scheme != "swift" {
|
||||
return nil, errors.Errorf("unexpected prefix: %s", data[0])
|
||||
}
|
||||
|
||||
if len(prefix) == 0 {
|
||||
return nil, errors.Errorf("prefix is empty")
|
||||
}
|
||||
|
||||
if prefix[0] != '/' {
|
||||
return nil, errors.Errorf("prefix does not start with slash (/)")
|
||||
}
|
||||
prefix = prefix[1:]
|
||||
|
||||
cfg := Config{
|
||||
Container: container,
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func ApplyEnvironment(prefix string, cfg interface{}) error {
|
||||
c := cfg.(*Config)
|
||||
for _, val := range []struct {
|
||||
s *string
|
||||
env string
|
||||
}{
|
||||
// v2/v3 specific
|
||||
{&c.UserName, prefix + "OS_USERNAME"},
|
||||
{&c.APIKey, prefix + "OS_PASSWORD"},
|
||||
{&c.Region, prefix + "OS_REGION_NAME"},
|
||||
{&c.AuthURL, prefix + "OS_AUTH_URL"},
|
||||
|
||||
// v3 specific
|
||||
{&c.Domain, prefix + "OS_USER_DOMAIN_NAME"},
|
||||
{&c.Tenant, prefix + "OS_PROJECT_NAME"},
|
||||
{&c.TenantDomain, prefix + "OS_PROJECT_DOMAIN_NAME"},
|
||||
|
||||
// v2 specific
|
||||
{&c.TenantID, prefix + "OS_TENANT_ID"},
|
||||
{&c.Tenant, prefix + "OS_TENANT_NAME"},
|
||||
|
||||
// v1 specific
|
||||
{&c.AuthURL, prefix + "ST_AUTH"},
|
||||
{&c.UserName, prefix + "ST_USER"},
|
||||
{&c.APIKey, prefix + "ST_KEY"},
|
||||
|
||||
// Manual authentication
|
||||
{&c.StorageURL, prefix + "OS_STORAGE_URL"},
|
||||
{&c.AuthToken, prefix + "OS_AUTH_TOKEN"},
|
||||
|
||||
{&c.DefaultContainerPolicy, prefix + "SWIFT_DEFAULT_CONTAINER_POLICY"},
|
||||
} {
|
||||
if *val.s == "" {
|
||||
*val.s = os.Getenv(val.env)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
53
src/restic/backend/swift/config_test.go
Normal file
53
src/restic/backend/swift/config_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package swift
|
||||
|
||||
import "testing"
|
||||
|
||||
var configTests = []struct {
|
||||
s string
|
||||
cfg Config
|
||||
}{
|
||||
{"swift:cnt1:/", Config{Container: "cnt1", Prefix: ""}},
|
||||
{"swift:cnt2:/prefix", Config{Container: "cnt2", Prefix: "prefix"}},
|
||||
{"swift:cnt3:/prefix/longer", Config{Container: "cnt3", Prefix: "prefix/longer"}},
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
for _, test := range configTests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
v, err := ParseConfig(test.s)
|
||||
if err != nil {
|
||||
t.Fatalf("parsing %q failed: %v", test.s, err)
|
||||
}
|
||||
|
||||
cfg, ok := v.(Config)
|
||||
if !ok {
|
||||
t.Fatalf("wrong type returned, want Config, got %T", cfg)
|
||||
}
|
||||
|
||||
if cfg != test.cfg {
|
||||
t.Fatalf("wrong output for %q, want:\n %#v\ngot:\n %#v",
|
||||
test.s, test.cfg, cfg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var configTestsInvalid = []string{
|
||||
"swift://hostname/container",
|
||||
"swift:////",
|
||||
"swift://",
|
||||
"swift:////prefix",
|
||||
"swift:container",
|
||||
"swift:container:",
|
||||
"swift:container/prefix",
|
||||
}
|
||||
|
||||
func TestParseConfigInvalid(t *testing.T) {
|
||||
for i, test := range configTestsInvalid {
|
||||
_, err := ParseConfig(test)
|
||||
if err == nil {
|
||||
t.Errorf("test %d: invalid config %s did not return an error", i, test)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
331
src/restic/backend/swift/swift.go
Normal file
331
src/restic/backend/swift/swift.go
Normal file
|
@ -0,0 +1,331 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
const connLimit = 10
|
||||
|
||||
// beSwift is a backend which stores the data on a swift endpoint.
|
||||
type beSwift struct {
|
||||
conn *swift.Connection
|
||||
connChan chan struct{}
|
||||
container string // Container name
|
||||
prefix string // Prefix of object names in the container
|
||||
backend.Layout
|
||||
}
|
||||
|
||||
// Open opens the swift backend at a container in region. The container is
|
||||
// created if it does not exist yet.
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
debug.Log("config %#v", cfg)
|
||||
|
||||
be := &beSwift{
|
||||
conn: &swift.Connection{
|
||||
UserName: cfg.UserName,
|
||||
Domain: cfg.Domain,
|
||||
ApiKey: cfg.APIKey,
|
||||
AuthUrl: cfg.AuthURL,
|
||||
Region: cfg.Region,
|
||||
Tenant: cfg.Tenant,
|
||||
TenantId: cfg.TenantID,
|
||||
TenantDomain: cfg.TenantDomain,
|
||||
TrustId: cfg.TrustID,
|
||||
StorageUrl: cfg.StorageURL,
|
||||
AuthToken: cfg.AuthToken,
|
||||
ConnectTimeout: time.Minute,
|
||||
Timeout: time.Minute,
|
||||
|
||||
Transport: backend.Transport(),
|
||||
},
|
||||
container: cfg.Container,
|
||||
prefix: cfg.Prefix,
|
||||
Layout: &backend.DefaultLayout{
|
||||
Path: cfg.Prefix,
|
||||
Join: path.Join,
|
||||
},
|
||||
}
|
||||
be.createConnections()
|
||||
|
||||
// Authenticate if needed
|
||||
if !be.conn.Authenticated() {
|
||||
if err := be.conn.Authenticate(); err != nil {
|
||||
return nil, errors.Wrap(err, "conn.Authenticate")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure container exists
|
||||
switch _, _, err := be.conn.Container(be.container); err {
|
||||
case nil:
|
||||
// Container exists
|
||||
|
||||
case swift.ContainerNotFound:
|
||||
err = be.createContainer(cfg.DefaultContainerPolicy)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "beSwift.createContainer")
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.Wrap(err, "conn.Container")
|
||||
}
|
||||
|
||||
// check that the server supports byte ranges
|
||||
_, hdr, err := be.conn.Account()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Account()")
|
||||
}
|
||||
|
||||
if hdr["Accept-Ranges"] != "bytes" {
|
||||
return nil, errors.New("backend does not support byte range")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
func (be *beSwift) createConnections() {
|
||||
be.connChan = make(chan struct{}, connLimit)
|
||||
for i := 0; i < connLimit; i++ {
|
||||
be.connChan <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (be *beSwift) createContainer(policy string) error {
|
||||
var h swift.Headers
|
||||
if policy != "" {
|
||||
h = swift.Headers{
|
||||
"X-Storage-Policy": policy,
|
||||
}
|
||||
}
|
||||
|
||||
return be.conn.ContainerCreate(be.container, h)
|
||||
}
|
||||
|
||||
// Location returns this backend's location (the container name).
|
||||
func (be *beSwift) Location() string {
|
||||
return be.container
|
||||
}
|
||||
|
||||
// Load returns a reader that yields the contents of the file at h at the
|
||||
// given offset. If length is nonzero, only a portion of the file is
|
||||
// returned. rd must be closed after use.
|
||||
func (be *beSwift) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
debug.Log("Load %v, length %v, offset %v", h, length, offset)
|
||||
if err := h.Valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return nil, errors.New("offset is negative")
|
||||
}
|
||||
|
||||
if length < 0 {
|
||||
return nil, errors.Errorf("invalid length %d", length)
|
||||
}
|
||||
|
||||
objName := be.Filename(h)
|
||||
|
||||
<-be.connChan
|
||||
defer func() {
|
||||
be.connChan <- struct{}{}
|
||||
}()
|
||||
|
||||
headers := swift.Headers{}
|
||||
if offset > 0 {
|
||||
headers["Range"] = fmt.Sprintf("bytes=%d-", offset)
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
headers["Range"] = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
|
||||
}
|
||||
|
||||
if _, ok := headers["Range"]; ok {
|
||||
debug.Log("Load(%v) send range %v", h, headers["Range"])
|
||||
}
|
||||
|
||||
obj, _, err := be.conn.ObjectOpen(be.container, objName, false, headers)
|
||||
if err != nil {
|
||||
debug.Log(" err %v", err)
|
||||
return nil, errors.Wrap(err, "conn.ObjectOpen")
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (be *beSwift) Save(h restic.Handle, rd io.Reader) (err error) {
|
||||
if err = h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objName := be.Filename(h)
|
||||
|
||||
debug.Log("Save %v at %v", h, objName)
|
||||
|
||||
// Check key does not already exist
|
||||
switch _, _, err = be.conn.Object(be.container, objName); err {
|
||||
case nil:
|
||||
debug.Log("%v already exists", h)
|
||||
return errors.New("key already exists")
|
||||
|
||||
case swift.ObjectNotFound:
|
||||
// Ok, that's what we want
|
||||
|
||||
default:
|
||||
return errors.Wrap(err, "conn.Object")
|
||||
}
|
||||
|
||||
<-be.connChan
|
||||
defer func() {
|
||||
be.connChan <- struct{}{}
|
||||
}()
|
||||
|
||||
encoding := "binary/octet-stream"
|
||||
|
||||
debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding)
|
||||
_, err = be.conn.ObjectPut(be.container, objName, rd, true, "", encoding, nil)
|
||||
debug.Log("%v, err %#v", objName, err)
|
||||
|
||||
return errors.Wrap(err, "client.PutObject")
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (be *beSwift) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
|
||||
debug.Log("%v", h)
|
||||
|
||||
objName := be.Filename(h)
|
||||
|
||||
obj, _, err := be.conn.Object(be.container, objName)
|
||||
if err != nil {
|
||||
debug.Log("Object() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "conn.Object")
|
||||
}
|
||||
|
||||
return restic.FileInfo{Size: obj.Bytes}, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (be *beSwift) Test(h restic.Handle) (bool, error) {
|
||||
objName := be.Filename(h)
|
||||
switch _, _, err := be.conn.Object(be.container, objName); err {
|
||||
case nil:
|
||||
return true, nil
|
||||
|
||||
case swift.ObjectNotFound:
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
return false, errors.Wrap(err, "conn.Object")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (be *beSwift) Remove(h restic.Handle) error {
|
||||
objName := be.Filename(h)
|
||||
err := be.conn.ObjectDelete(be.container, objName)
|
||||
debug.Log("Remove(%v) -> err %v", h, err)
|
||||
return errors.Wrap(err, "conn.ObjectDelete")
|
||||
}
|
||||
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (be *beSwift) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
debug.Log("listing %v", t)
|
||||
ch := make(chan string)
|
||||
|
||||
prefix := be.Filename(restic.Handle{Type: t}) + "/"
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
err := be.conn.ObjectsWalk(be.container, &swift.ObjectsOpts{Prefix: prefix},
|
||||
func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
newObjects, err := be.conn.ObjectNames(be.container, opts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "conn.ObjectNames")
|
||||
}
|
||||
for _, obj := range newObjects {
|
||||
m := filepath.Base(strings.TrimPrefix(obj, prefix))
|
||||
if m == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- m:
|
||||
case <-done:
|
||||
return nil, io.EOF
|
||||
}
|
||||
}
|
||||
return newObjects, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
debug.Log("ObjectsWalk returned error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Remove keys for a specified backend type.
|
||||
func (be *beSwift) removeKeys(t restic.FileType) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for key := range be.List(t, done) {
|
||||
err := be.Remove(restic.Handle{Type: t, Name: key})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a not existing file.
|
||||
func (be *beSwift) IsNotExist(err error) bool {
|
||||
if e, ok := errors.Cause(err).(*swift.Error); ok {
|
||||
return e.StatusCode == http.StatusNotFound
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete removes all restic objects in the container.
|
||||
// It will not remove the container itself.
|
||||
func (be *beSwift) Delete() error {
|
||||
alltypes := []restic.FileType{
|
||||
restic.DataFile,
|
||||
restic.KeyFile,
|
||||
restic.LockFile,
|
||||
restic.SnapshotFile,
|
||||
restic.IndexFile}
|
||||
|
||||
for _, t := range alltypes {
|
||||
err := be.removeKeys(t)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err := be.Remove(restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
func (be *beSwift) Close() error { return nil }
|
107
src/restic/backend/swift/swift_test.go
Normal file
107
src/restic/backend/swift/swift_test.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package swift_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"restic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
. "restic/test"
|
||||
|
||||
"restic/backend/swift"
|
||||
"restic/backend/test"
|
||||
)
|
||||
|
||||
func newSwiftTestSuite(t testing.TB) *test.Suite {
|
||||
return &test.Suite{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (interface{}, error) {
|
||||
swiftcfg, err := swift.ParseConfig(os.Getenv("RESTIC_TEST_SWIFT"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := swiftcfg.(swift.Config)
|
||||
if err = swift.ApplyEnvironment("RESTIC_TEST_", &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Prefix += fmt.Sprintf("/test-%d", time.Now().UnixNano())
|
||||
t.Logf("using prefix %v", cfg.Prefix)
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(swift.Config)
|
||||
|
||||
be, err := swift.Open(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := be.Test(restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(config interface{}) (restic.Backend, error) {
|
||||
cfg := config.(swift.Config)
|
||||
return swift.Open(cfg)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(config interface{}) error {
|
||||
cfg := config.(swift.Config)
|
||||
|
||||
be, err := swift.Open(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := be.(restic.Deleter).Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendSwift(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
SkipDisallowed(t, "restic/backend/swift.TestBackendSwift")
|
||||
}
|
||||
}()
|
||||
|
||||
if os.Getenv("RESTIC_TEST_SWIFT") == "" {
|
||||
t.Skip("RESTIC_TEST_SWIFT unset, skipping test")
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newSwiftTestSuite(t).RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendSwift(t *testing.B) {
|
||||
if os.Getenv("RESTIC_TEST_SWIFT") == "" {
|
||||
t.Skip("RESTIC_TEST_SWIFT unset, skipping test")
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newSwiftTestSuite(t).RunBenchmarks(t)
|
||||
}
|
|
@ -434,6 +434,24 @@ func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error
|
|||
return err
|
||||
}
|
||||
|
||||
func delayedRemove(b restic.Backend, h restic.Handle) error {
|
||||
// Some backend (swift, I'm looking at you) may implement delayed
|
||||
// removal of data. Let's wait a bit if this happens.
|
||||
err := b.Remove(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found, err := b.Test(h)
|
||||
for i := 0; found && i < 20; i++ {
|
||||
found, err = b.Test(h)
|
||||
if found {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// TestBackend tests all functions of the backend.
|
||||
func (s *Suite) TestBackend(t *testing.T) {
|
||||
b := s.open(t)
|
||||
|
@ -508,7 +526,7 @@ func (s *Suite) TestBackend(t *testing.T) {
|
|||
test.Assert(t, err != nil, "expected error for %v, got %v", h, err)
|
||||
|
||||
// remove and recreate
|
||||
err = b.Remove(h)
|
||||
err = delayedRemove(b, h)
|
||||
test.OK(t, err)
|
||||
|
||||
// test that the blob is gone
|
||||
|
@ -558,7 +576,7 @@ func (s *Suite) TestBackend(t *testing.T) {
|
|||
test.OK(t, err)
|
||||
test.Assert(t, found, fmt.Sprintf("id %q not found", id))
|
||||
|
||||
test.OK(t, b.Remove(h))
|
||||
test.OK(t, delayedRemove(b, h))
|
||||
|
||||
found, err = b.Test(h)
|
||||
test.OK(t, err)
|
||||
|
|
6
vendor/manifest
vendored
6
vendor/manifest
vendored
|
@ -43,6 +43,12 @@
|
|||
"revision": "85f15b007f08e11a62c769abe65299b812fd2e0d",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/ncw/swift",
|
||||
"repository": "https://github.com/ncw/swift",
|
||||
"revision": "bf51ccd3b5c3a1f12ac762b4511c5f9f1ce6b26f",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pkg/errors",
|
||||
"repository": "https://github.com/pkg/errors",
|
||||
|
|
20
vendor/src/github.com/ncw/swift/COPYING
vendored
Normal file
20
vendor/src/github.com/ncw/swift/COPYING
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
142
vendor/src/github.com/ncw/swift/README.md
vendored
Normal file
142
vendor/src/github.com/ncw/swift/README.md
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
Swift
|
||||
=====
|
||||
|
||||
This package provides an easy to use library for interfacing with
|
||||
Swift / Openstack Object Storage / Rackspace cloud files from the Go
|
||||
Language
|
||||
|
||||
See here for package docs
|
||||
|
||||
http://godoc.org/github.com/ncw/swift
|
||||
|
||||
[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift)
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Use go to install the library
|
||||
|
||||
go get github.com/ncw/swift
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
See here for full package docs
|
||||
|
||||
- http://godoc.org/github.com/ncw/swift
|
||||
|
||||
Here is a short example from the docs
|
||||
|
||||
import "github.com/ncw/swift"
|
||||
|
||||
// Create a connection
|
||||
c := swift.Connection{
|
||||
UserName: "user",
|
||||
ApiKey: "key",
|
||||
AuthUrl: "auth_url",
|
||||
Domain: "domain", // Name of the domain (v3 auth only)
|
||||
Tenant: "tenant", // Name of the tenant (v2 auth only)
|
||||
}
|
||||
// Authenticate
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// List all the containers
|
||||
containers, err := c.ContainerNames(nil)
|
||||
fmt.Println(containers)
|
||||
// etc...
|
||||
|
||||
Additions
|
||||
---------
|
||||
|
||||
The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface.
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
To run the tests you can either use an embedded fake Swift server
|
||||
either use a real Openstack Swift server or a Rackspace Cloud files account.
|
||||
|
||||
When using a real Swift server, you need to set these environment variables
|
||||
before running the tests
|
||||
|
||||
export SWIFT_API_USER='user'
|
||||
export SWIFT_API_KEY='key'
|
||||
export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0'
|
||||
|
||||
And optionally these if using v2 authentication
|
||||
|
||||
export SWIFT_TENANT='TenantName'
|
||||
export SWIFT_TENANT_ID='TenantId'
|
||||
|
||||
And optionally these if using v3 authentication
|
||||
|
||||
export SWIFT_TENANT='TenantName'
|
||||
export SWIFT_TENANT_ID='TenantId'
|
||||
export SWIFT_API_DOMAIN_ID='domain id'
|
||||
export SWIFT_API_DOMAIN='domain name'
|
||||
|
||||
And optionally these if using v3 trust
|
||||
|
||||
export SWIFT_TRUST_ID='TrustId'
|
||||
|
||||
And optionally this if you want to skip server certificate validation
|
||||
|
||||
export SWIFT_AUTH_INSECURE=1
|
||||
|
||||
And optionally this to configure the connect channel timeout, in seconds
|
||||
|
||||
export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60
|
||||
|
||||
And optionally this to configure the data channel timeout, in seconds
|
||||
|
||||
export SWIFT_DATA_CHANNEL_TIMEOUT=60
|
||||
|
||||
Then run the tests with `go test`
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT license (check COPYING file
|
||||
included in this package).
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
- https://github.com/ncw/swift
|
||||
|
||||
There you can file bug reports, ask for help or contribute patches.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
- Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
- Brian "bojo" Jones <mojobojo@gmail.com>
|
||||
- Janika Liiv <janika@toggl.com>
|
||||
- Yamamoto, Hirotaka <ymmt2005@gmail.com>
|
||||
- Stephen <yo@groks.org>
|
||||
- platformpurple <stephen@platformpurple.com>
|
||||
- Paul Querna <pquerna@apache.org>
|
||||
- Livio Soares <liviobs@gmail.com>
|
||||
- thesyncim <thesyncim@gmail.com>
|
||||
- lsowen <lsowen@s1network.com>
|
||||
- Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
- Chris Kastorff <encryptio@gmail.com>
|
||||
- Dai HaoJun <haojun.dai@hp.com>
|
||||
- Hua Wang <wanghua.humble@gmail.com>
|
||||
- Fabian Ruff <fabian@progra.de>
|
||||
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
|
||||
- Petr Kotek <petr.kotek@bigcommerce.com>
|
||||
- Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
- Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
- Sam Gunaratne <samgzeit@gmail.com>
|
||||
- Richard Scothern <richard.scothern@gmail.com>
|
||||
- Michel Couillard <couillard.michel@voxlog.ca>
|
||||
- Christopher Waldon <ckwaldon@us.ibm.com>
|
320
vendor/src/github.com/ncw/swift/auth.go
vendored
Normal file
320
vendor/src/github.com/ncw/swift/auth.go
vendored
Normal file
|
@ -0,0 +1,320 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Auth defines the operations needed to authenticate with swift
|
||||
//
|
||||
// This encapsulates the different authentication schemes in use
|
||||
type Authenticator interface {
|
||||
// Request creates an http.Request for the auth - return nil if not needed
|
||||
Request(*Connection) (*http.Request, error)
|
||||
// Response parses the http.Response
|
||||
Response(resp *http.Response) error
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
StorageUrl(Internal bool) string
|
||||
// The access token
|
||||
Token() string
|
||||
// The CDN url if available
|
||||
CdnUrl() string
|
||||
}
|
||||
|
||||
type CustomEndpointAuthenticator interface {
|
||||
StorageUrlForEndpoint(endpointType EndpointType) string
|
||||
}
|
||||
|
||||
type EndpointType string
|
||||
|
||||
const (
|
||||
// Use public URL as storage URL
|
||||
EndpointTypePublic = EndpointType("public")
|
||||
|
||||
// Use internal URL as storage URL
|
||||
EndpointTypeInternal = EndpointType("internal")
|
||||
|
||||
// Use admin URL as storage URL
|
||||
EndpointTypeAdmin = EndpointType("admin")
|
||||
)
|
||||
|
||||
// newAuth - create a new Authenticator from the AuthUrl
|
||||
//
|
||||
// A hint for AuthVersion can be provided
|
||||
func newAuth(c *Connection) (Authenticator, error) {
|
||||
AuthVersion := c.AuthVersion
|
||||
if AuthVersion == 0 {
|
||||
if strings.Contains(c.AuthUrl, "v3") {
|
||||
AuthVersion = 3
|
||||
} else if strings.Contains(c.AuthUrl, "v2") {
|
||||
AuthVersion = 2
|
||||
} else if strings.Contains(c.AuthUrl, "v1") {
|
||||
AuthVersion = 1
|
||||
} else {
|
||||
return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
|
||||
}
|
||||
}
|
||||
switch AuthVersion {
|
||||
case 1:
|
||||
return &v1Auth{}, nil
|
||||
case 2:
|
||||
return &v2Auth{
|
||||
// Guess as to whether using API key or
|
||||
// password it will try both eventually so
|
||||
// this is just an optimization.
|
||||
useApiKey: len(c.ApiKey) >= 32,
|
||||
}, nil
|
||||
case 3:
|
||||
return &v3Auth{}, nil
|
||||
}
|
||||
return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// v1 auth
|
||||
type v1Auth struct {
|
||||
Headers http.Header // V1 auth: the authentication headers so extensions can access them
|
||||
}
|
||||
|
||||
// v1 Authentication - make request
|
||||
func (auth *v1Auth) Request(c *Connection) (*http.Request, error) {
|
||||
req, err := http.NewRequest("GET", c.AuthUrl, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
req.Header.Set("X-Auth-Key", c.ApiKey)
|
||||
req.Header.Set("X-Auth-User", c.UserName)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// v1 Authentication - read response
|
||||
func (auth *v1Auth) Response(resp *http.Response) error {
|
||||
auth.Headers = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// v1 Authentication - read storage url
|
||||
func (auth *v1Auth) StorageUrl(Internal bool) string {
|
||||
storageUrl := auth.Headers.Get("X-Storage-Url")
|
||||
if Internal {
|
||||
newUrl, err := url.Parse(storageUrl)
|
||||
if err != nil {
|
||||
return storageUrl
|
||||
}
|
||||
newUrl.Host = "snet-" + newUrl.Host
|
||||
storageUrl = newUrl.String()
|
||||
}
|
||||
return storageUrl
|
||||
}
|
||||
|
||||
// v1 Authentication - read auth token
|
||||
func (auth *v1Auth) Token() string {
|
||||
return auth.Headers.Get("X-Auth-Token")
|
||||
}
|
||||
|
||||
// v1 Authentication - read cdn url
|
||||
func (auth *v1Auth) CdnUrl() string {
|
||||
return auth.Headers.Get("X-CDN-Management-Url")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// v2 Authentication
|
||||
type v2Auth struct {
|
||||
Auth *v2AuthResponse
|
||||
Region string
|
||||
useApiKey bool // if set will use API key not Password
|
||||
useApiKeyOk bool // if set won't change useApiKey any more
|
||||
notFirst bool // set after first run
|
||||
}
|
||||
|
||||
// v2 Authentication - make request
|
||||
func (auth *v2Auth) Request(c *Connection) (*http.Request, error) {
|
||||
auth.Region = c.Region
|
||||
// Toggle useApiKey if not first run and not OK yet
|
||||
if auth.notFirst && !auth.useApiKeyOk {
|
||||
auth.useApiKey = !auth.useApiKey
|
||||
}
|
||||
auth.notFirst = true
|
||||
// Create a V2 auth request for the body of the connection
|
||||
var v2i interface{}
|
||||
if !auth.useApiKey {
|
||||
// Normal swift authentication
|
||||
v2 := v2AuthRequest{}
|
||||
v2.Auth.PasswordCredentials.UserName = c.UserName
|
||||
v2.Auth.PasswordCredentials.Password = c.ApiKey
|
||||
v2.Auth.Tenant = c.Tenant
|
||||
v2.Auth.TenantId = c.TenantId
|
||||
v2i = v2
|
||||
} else {
|
||||
// Rackspace special with API Key
|
||||
v2 := v2AuthRequestRackspace{}
|
||||
v2.Auth.ApiKeyCredentials.UserName = c.UserName
|
||||
v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
|
||||
v2.Auth.Tenant = c.Tenant
|
||||
v2.Auth.TenantId = c.TenantId
|
||||
v2i = v2
|
||||
}
|
||||
body, err := json.Marshal(v2i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url := c.AuthUrl
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
url += "tokens"
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// v2 Authentication - read response
|
||||
func (auth *v2Auth) Response(resp *http.Response) error {
|
||||
auth.Auth = new(v2AuthResponse)
|
||||
err := readJson(resp, auth.Auth)
|
||||
// If successfully read Auth then no need to toggle useApiKey any more
|
||||
if err == nil {
|
||||
auth.useApiKeyOk = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Finds the Endpoint Url of "type" from the v2AuthResponse using the
|
||||
// Region if set or defaulting to the first one if not
|
||||
//
|
||||
// Returns "" if not found
|
||||
func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
|
||||
for _, catalog := range auth.Auth.Access.ServiceCatalog {
|
||||
if catalog.Type == Type {
|
||||
for _, endpoint := range catalog.Endpoints {
|
||||
if auth.Region == "" || (auth.Region == endpoint.Region) {
|
||||
switch endpointType {
|
||||
case EndpointTypeInternal:
|
||||
return endpoint.InternalUrl
|
||||
case EndpointTypePublic:
|
||||
return endpoint.PublicUrl
|
||||
case EndpointTypeAdmin:
|
||||
return endpoint.AdminUrl
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// v2 Authentication - read storage url
|
||||
//
|
||||
// If Internal is true then it reads the private (internal / service
|
||||
// net) URL.
|
||||
func (auth *v2Auth) StorageUrl(Internal bool) string {
|
||||
endpointType := EndpointTypePublic
|
||||
if Internal {
|
||||
endpointType = EndpointTypeInternal
|
||||
}
|
||||
return auth.StorageUrlForEndpoint(endpointType)
|
||||
}
|
||||
|
||||
// v2 Authentication - read storage url
|
||||
//
|
||||
// Use the indicated endpointType to choose a URL.
|
||||
func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
|
||||
return auth.endpointUrl("object-store", endpointType)
|
||||
}
|
||||
|
||||
// v2 Authentication - read auth token
|
||||
func (auth *v2Auth) Token() string {
|
||||
return auth.Auth.Access.Token.Id
|
||||
}
|
||||
|
||||
// v2 Authentication - read cdn url
|
||||
func (auth *v2Auth) CdnUrl() string {
|
||||
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// V2 Authentication request
|
||||
//
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
||||
type v2AuthRequest struct {
|
||||
Auth struct {
|
||||
PasswordCredentials struct {
|
||||
UserName string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"passwordCredentials"`
|
||||
Tenant string `json:"tenantName,omitempty"`
|
||||
TenantId string `json:"tenantId,omitempty"`
|
||||
} `json:"auth"`
|
||||
}
|
||||
|
||||
// V2 Authentication request - Rackspace variant
|
||||
//
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
||||
type v2AuthRequestRackspace struct {
|
||||
Auth struct {
|
||||
ApiKeyCredentials struct {
|
||||
UserName string `json:"username"`
|
||||
ApiKey string `json:"apiKey"`
|
||||
} `json:"RAX-KSKEY:apiKeyCredentials"`
|
||||
Tenant string `json:"tenantName,omitempty"`
|
||||
TenantId string `json:"tenantId,omitempty"`
|
||||
} `json:"auth"`
|
||||
}
|
||||
|
||||
// V2 Authentication reply
|
||||
//
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
||||
type v2AuthResponse struct {
|
||||
Access struct {
|
||||
ServiceCatalog []struct {
|
||||
Endpoints []struct {
|
||||
InternalUrl string
|
||||
PublicUrl string
|
||||
AdminUrl string
|
||||
Region string
|
||||
TenantId string
|
||||
}
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
Token struct {
|
||||
Expires string
|
||||
Id string
|
||||
Tenant struct {
|
||||
Id string
|
||||
Name string
|
||||
}
|
||||
}
|
||||
User struct {
|
||||
DefaultRegion string `json:"RAX-AUTH:defaultRegion"`
|
||||
Id string
|
||||
Name string
|
||||
Roles []struct {
|
||||
Description string
|
||||
Id string
|
||||
Name string
|
||||
TenantId string
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
227
vendor/src/github.com/ncw/swift/auth_v3.go
vendored
Normal file
227
vendor/src/github.com/ncw/swift/auth_v3.go
vendored
Normal file
|
@ -0,0 +1,227 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
v3AuthMethodToken = "token"
|
||||
v3AuthMethodPassword = "password"
|
||||
v3CatalogTypeObjectStore = "object-store"
|
||||
)
|
||||
|
||||
// V3 Authentication request
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://developer.openstack.org/api-ref-identity-v3.html
|
||||
type v3AuthRequest struct {
|
||||
Auth struct {
|
||||
Identity struct {
|
||||
Methods []string `json:"methods"`
|
||||
Password *v3AuthPassword `json:"password,omitempty"`
|
||||
Token *v3AuthToken `json:"token,omitempty"`
|
||||
} `json:"identity"`
|
||||
Scope *v3Scope `json:"scope,omitempty"`
|
||||
} `json:"auth"`
|
||||
}
|
||||
|
||||
type v3Scope struct {
|
||||
Project *v3Project `json:"project,omitempty"`
|
||||
Domain *v3Domain `json:"domain,omitempty"`
|
||||
Trust *v3Trust `json:"OS-TRUST:trust,omitempty"`
|
||||
}
|
||||
|
||||
type v3Domain struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type v3Project struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Domain *v3Domain `json:"domain,omitempty"`
|
||||
}
|
||||
|
||||
type v3Trust struct {
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
||||
type v3User struct {
|
||||
Domain *v3Domain `json:"domain,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
type v3AuthToken struct {
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
||||
type v3AuthPassword struct {
|
||||
User v3User `json:"user"`
|
||||
}
|
||||
|
||||
// V3 Authentication response
|
||||
type v3AuthResponse struct {
|
||||
Token struct {
|
||||
Expires_At, Issued_At string
|
||||
Methods []string
|
||||
Roles []struct {
|
||||
Id, Name string
|
||||
Links struct {
|
||||
Self string
|
||||
}
|
||||
}
|
||||
|
||||
Project struct {
|
||||
Domain struct {
|
||||
Id, Name string
|
||||
}
|
||||
Id, Name string
|
||||
}
|
||||
|
||||
Catalog []struct {
|
||||
Id, Namem, Type string
|
||||
Endpoints []struct {
|
||||
Id, Region_Id, Url, Region string
|
||||
Interface EndpointType
|
||||
}
|
||||
}
|
||||
|
||||
User struct {
|
||||
Id, Name string
|
||||
Domain struct {
|
||||
Id, Name string
|
||||
Links struct {
|
||||
Self string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Audit_Ids []string
|
||||
}
|
||||
}
|
||||
|
||||
type v3Auth struct {
|
||||
Region string
|
||||
Auth *v3AuthResponse
|
||||
Headers http.Header
|
||||
}
|
||||
|
||||
func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||
auth.Region = c.Region
|
||||
|
||||
var v3i interface{}
|
||||
|
||||
v3 := v3AuthRequest{}
|
||||
|
||||
if c.UserName == "" {
|
||||
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
|
||||
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
|
||||
} else {
|
||||
v3.Auth.Identity.Methods = []string{v3AuthMethodPassword}
|
||||
v3.Auth.Identity.Password = &v3AuthPassword{
|
||||
User: v3User{
|
||||
Name: c.UserName,
|
||||
Password: c.ApiKey,
|
||||
},
|
||||
}
|
||||
|
||||
var domain *v3Domain
|
||||
|
||||
if c.Domain != "" {
|
||||
domain = &v3Domain{Name: c.Domain}
|
||||
} else if c.DomainId != "" {
|
||||
domain = &v3Domain{Id: c.DomainId}
|
||||
}
|
||||
v3.Auth.Identity.Password.User.Domain = domain
|
||||
}
|
||||
|
||||
if c.TrustId != "" {
|
||||
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
||||
} else if c.TenantId != "" || c.Tenant != "" {
|
||||
|
||||
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
|
||||
|
||||
if c.TenantId != "" {
|
||||
v3.Auth.Scope.Project.Id = c.TenantId
|
||||
} else if c.Tenant != "" {
|
||||
v3.Auth.Scope.Project.Name = c.Tenant
|
||||
switch {
|
||||
case c.TenantDomain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
|
||||
case c.TenantDomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
|
||||
case c.Domain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
|
||||
case c.DomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
|
||||
default:
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v3i = v3
|
||||
|
||||
body, err := json.Marshal(v3i)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := c.AuthUrl
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
url += "auth/tokens"
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (auth *v3Auth) Response(resp *http.Response) error {
|
||||
auth.Auth = &v3AuthResponse{}
|
||||
auth.Headers = resp.Header
|
||||
err := readJson(resp, auth.Auth)
|
||||
return err
|
||||
}
|
||||
|
||||
func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string {
|
||||
for _, catalog := range auth.Auth.Token.Catalog {
|
||||
if catalog.Type == Type {
|
||||
for _, endpoint := range catalog.Endpoints {
|
||||
if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) {
|
||||
return endpoint.Url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (auth *v3Auth) StorageUrl(Internal bool) string {
|
||||
endpointType := EndpointTypePublic
|
||||
if Internal {
|
||||
endpointType = EndpointTypeInternal
|
||||
}
|
||||
return auth.StorageUrlForEndpoint(endpointType)
|
||||
}
|
||||
|
||||
func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
|
||||
return auth.endpointUrl("object-store", endpointType)
|
||||
}
|
||||
|
||||
func (auth *v3Auth) Token() string {
|
||||
return auth.Headers.Get("X-Subject-Token")
|
||||
}
|
||||
|
||||
func (auth *v3Auth) CdnUrl() string {
|
||||
return ""
|
||||
}
|
28
vendor/src/github.com/ncw/swift/compatibility_1_0.go
vendored
Normal file
28
vendor/src/github.com/ncw/swift/compatibility_1_0.go
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Go 1.0 compatibility functions
|
||||
|
||||
// +build !go1.1
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cancel the request - doesn't work under < go 1.1
|
||||
func cancelRequest(transport http.RoundTripper, req *http.Request) {
|
||||
log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1")
|
||||
}
|
||||
|
||||
// Reset a timer - Doesn't work properly < go 1.1
|
||||
//
|
||||
// This is quite hard to do properly under go < 1.1 so we do a crude
|
||||
// approximation and hope that everyone upgrades to go 1.1 quickly
|
||||
func resetTimer(t *time.Timer, d time.Duration) {
|
||||
t.Stop()
|
||||
// Very likely this doesn't actually work if we are already
|
||||
// selecting on t.C. However we've stopped the original timer
|
||||
// so won't break transfers but may not time them out :-(
|
||||
*t = *time.NewTimer(d)
|
||||
}
|
24
vendor/src/github.com/ncw/swift/compatibility_1_1.go
vendored
Normal file
24
vendor/src/github.com/ncw/swift/compatibility_1_1.go
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Go 1.1 and later compatibility functions
|
||||
//
|
||||
// +build go1.1
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cancel the request
|
||||
func cancelRequest(transport http.RoundTripper, req *http.Request) {
|
||||
if tr, ok := transport.(interface {
|
||||
CancelRequest(*http.Request)
|
||||
}); ok {
|
||||
tr.CancelRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset a timer
|
||||
func resetTimer(t *time.Timer, d time.Duration) {
|
||||
t.Reset(d)
|
||||
}
|
136
vendor/src/github.com/ncw/swift/dlo.go
vendored
Normal file
136
vendor/src/github.com/ncw/swift/dlo.go
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// DynamicLargeObjectCreateFile represents an open static large object
|
||||
type DynamicLargeObjectCreateFile struct {
|
||||
largeObjectCreateFile
|
||||
}
|
||||
|
||||
// DynamicLargeObjectCreateFile creates a dynamic large object
|
||||
// returning an object which satisfies io.Writer, io.Seeker, io.Closer
|
||||
// and io.ReaderFrom. The flags are as passes to the
|
||||
// largeObjectCreate method.
|
||||
func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
lo, err := c.largeObjectCreate(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return withBuffer(opts, &DynamicLargeObjectCreateFile{
|
||||
largeObjectCreateFile: *lo,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// DynamicLargeObjectCreate creates or truncates an existing dynamic
|
||||
// large object returning a writeable object. This sets opts.Flags to
|
||||
// an appropriate value before calling DynamicLargeObjectCreateFile
|
||||
func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
||||
return c.DynamicLargeObjectCreateFile(opts)
|
||||
}
|
||||
|
||||
// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
|
||||
func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
|
||||
return c.LargeObjectDelete(container, path)
|
||||
}
|
||||
|
||||
// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
||||
func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
||||
info, headers, err := c.Object(dstContainer, srcObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
|
||||
if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDLOManifest creates a dynamic large object manifest
|
||||
func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error {
|
||||
headers := make(Headers)
|
||||
headers["X-Object-Manifest"] = prefix
|
||||
manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := manifest.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close satisfies the io.Closer interface
|
||||
func (file *DynamicLargeObjectCreateFile) Close() error {
|
||||
return file.Flush()
|
||||
}
|
||||
|
||||
func (file *DynamicLargeObjectCreateFile) Flush() error {
|
||||
err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
||||
}
|
||||
|
||||
func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) {
|
||||
//a simple container listing works 99.9% of the time
|
||||
segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasObjectName := make(map[string]struct{})
|
||||
for _, segment := range segments {
|
||||
hasObjectName[segment.Name] = struct{}{}
|
||||
}
|
||||
|
||||
//The container listing might be outdated (i.e. not contain all existing
|
||||
//segment objects yet) because of temporary inconsistency (Swift is only
|
||||
//eventually consistent!). Check its completeness.
|
||||
segmentNumber := 0
|
||||
for {
|
||||
segmentNumber++
|
||||
segmentName := getSegment(segmentPath, segmentNumber)
|
||||
if _, seen := hasObjectName[segmentName]; seen {
|
||||
continue
|
||||
}
|
||||
|
||||
//This segment is missing in the container listing. Use a more reliable
|
||||
//request to check its existence. (HEAD requests on segments are
|
||||
//guaranteed to return the correct metadata, except for the pathological
|
||||
//case of an outage of large parts of the Swift cluster or its network,
|
||||
//since every segment is only written once.)
|
||||
segment, _, err := c.Object(segmentContainer, segmentName)
|
||||
switch err {
|
||||
case nil:
|
||||
//found new segment -> add it in the correct position and keep
|
||||
//going, more might be missing
|
||||
if segmentNumber <= len(segments) {
|
||||
segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
|
||||
segments[segmentNumber-1] = segment
|
||||
} else {
|
||||
segments = append(segments, segment)
|
||||
}
|
||||
continue
|
||||
case ObjectNotFound:
|
||||
//This segment is missing. Since we upload segments sequentially,
|
||||
//there won't be any more segments after it.
|
||||
return segments, nil
|
||||
default:
|
||||
return nil, err //unexpected error
|
||||
}
|
||||
}
|
||||
}
|
19
vendor/src/github.com/ncw/swift/doc.go
vendored
Normal file
19
vendor/src/github.com/ncw/swift/doc.go
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files
|
||||
|
||||
Standard Usage
|
||||
|
||||
Most of the work is done through the Container*() and Object*() methods.
|
||||
|
||||
All methods are safe to use concurrently in multiple go routines.
|
||||
|
||||
Object Versioning
|
||||
|
||||
As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system.
|
||||
|
||||
Rackspace Sub Module
|
||||
|
||||
This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects.
|
||||
|
||||
*/
|
||||
package swift
|
109
vendor/src/github.com/ncw/swift/example_test.go
vendored
Normal file
109
vendor/src/github.com/ncw/swift/example_test.go
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
// Copyright...
|
||||
|
||||
// This example demonstrates opening a Connection and doing some basic operations.
|
||||
package swift_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
func ExampleConnection() {
|
||||
// Create a v1 auth connection
|
||||
c := &swift.Connection{
|
||||
// This should be your username
|
||||
UserName: "user",
|
||||
// This should be your api key
|
||||
ApiKey: "key",
|
||||
// This should be a v1 auth url, eg
|
||||
// Rackspace US https://auth.api.rackspacecloud.com/v1.0
|
||||
// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0
|
||||
// Memset Memstore UK https://auth.storage.memset.com/v1.0
|
||||
AuthUrl: "auth_url",
|
||||
}
|
||||
|
||||
// Authenticate
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// List all the containers
|
||||
containers, err := c.ContainerNames(nil)
|
||||
fmt.Println(containers)
|
||||
// etc...
|
||||
|
||||
// ------ or alternatively create a v2 connection ------
|
||||
|
||||
// Create a v2 auth connection
|
||||
c = &swift.Connection{
|
||||
// This is the sub user for the storage - eg "admin"
|
||||
UserName: "user",
|
||||
// This should be your api key
|
||||
ApiKey: "key",
|
||||
// This should be a version2 auth url, eg
|
||||
// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0
|
||||
// Memset Memstore v2 https://auth.storage.memset.com/v2.0
|
||||
AuthUrl: "v2_auth_url",
|
||||
// Region to use - default is use first region if unset
|
||||
Region: "LON",
|
||||
// Name of the tenant - this is likely your username
|
||||
Tenant: "jim",
|
||||
}
|
||||
|
||||
// as above...
|
||||
}
|
||||
|
||||
var container string
|
||||
|
||||
func ExampleConnection_ObjectsWalk() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
objects := make([]string, 0)
|
||||
err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
newObjects, err := c.ObjectNames(container, opts)
|
||||
if err == nil {
|
||||
objects = append(objects, newObjects...)
|
||||
}
|
||||
return newObjects, err
|
||||
})
|
||||
fmt.Println("Found all the objects", objects, err)
|
||||
}
|
||||
|
||||
func ExampleConnection_VersionContainerCreate() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
// Use the helper method to create the current and versions container.
|
||||
if err := c.VersionContainerCreate("cds", "cd-versions"); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleConnection_VersionEnable() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
// Build the containers manually and enable them.
|
||||
if err := c.ContainerCreate("movie-versions", nil); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
if err := c.ContainerCreate("movies", nil); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
if err := c.VersionEnable("movies", "movie-versions"); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
|
||||
// Access the primary container as usual with ObjectCreate(), ObjectPut(), etc.
|
||||
// etc...
|
||||
}
|
||||
|
||||
func ExampleConnection_VersionDisable() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
// Disable versioning on a container. Note that this does not delete the versioning container.
|
||||
c.VersionDisable("movies")
|
||||
}
|
445
vendor/src/github.com/ncw/swift/largeobjects.go
vendored
Normal file
445
vendor/src/github.com/ncw/swift/largeobjects.go
vendored
Normal file
|
@ -0,0 +1,445 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
gopath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NotLargeObject is returned if an operation is performed on an object which isn't large.
|
||||
var NotLargeObject = errors.New("Not a large object")
|
||||
|
||||
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
|
||||
var readAfterWriteTimeout = 15 * time.Second
|
||||
|
||||
// readAfterWriteWait defines the time to sleep between two retries
|
||||
var readAfterWriteWait = 200 * time.Millisecond
|
||||
|
||||
// largeObjectCreateFile represents an open static or dynamic large object
|
||||
type largeObjectCreateFile struct {
|
||||
conn *Connection
|
||||
container string
|
||||
objectName string
|
||||
currentLength int64
|
||||
filePos int64
|
||||
chunkSize int64
|
||||
segmentContainer string
|
||||
prefix string
|
||||
contentType string
|
||||
checkHash bool
|
||||
segments []Object
|
||||
headers Headers
|
||||
minChunkSize int64
|
||||
}
|
||||
|
||||
func swiftSegmentPath(path string) (string, error) {
|
||||
checksum := sha1.New()
|
||||
random := make([]byte, 32)
|
||||
if _, err := rand.Read(random); err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
|
||||
return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
|
||||
}
|
||||
|
||||
func getSegment(segmentPath string, partNumber int) string {
|
||||
return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
|
||||
}
|
||||
|
||||
func parseFullPath(manifest string) (container string, prefix string) {
|
||||
components := strings.SplitN(manifest, "/", 2)
|
||||
container = components[0]
|
||||
if len(components) > 1 {
|
||||
prefix = components[1]
|
||||
}
|
||||
return container, prefix
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObjectDLO() bool {
|
||||
_, isDLO := headers["X-Object-Manifest"]
|
||||
return isDLO
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObjectSLO() bool {
|
||||
_, isSLO := headers["X-Static-Large-Object"]
|
||||
return isSLO
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObject() bool {
|
||||
return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
|
||||
}
|
||||
|
||||
func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) {
|
||||
if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
|
||||
segmentContainer, segmentPath := parseFullPath(manifest)
|
||||
segments, err := c.getAllDLOSegments(segmentContainer, segmentPath)
|
||||
return segmentContainer, segments, err
|
||||
}
|
||||
if headers.IsLargeObjectSLO() {
|
||||
return c.getAllSLOSegments(container, path)
|
||||
}
|
||||
return "", nil, NotLargeObject
|
||||
}
|
||||
|
||||
// LargeObjectOpts describes how a large object should be created
|
||||
type LargeObjectOpts struct {
|
||||
Container string // Name of container to place object
|
||||
ObjectName string // Name of object
|
||||
Flags int // Creation flags
|
||||
CheckHash bool // If set Check the hash
|
||||
Hash string // If set use this hash to check
|
||||
ContentType string // Content-Type of the object
|
||||
Headers Headers // Additional headers to upload the object with
|
||||
ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
|
||||
MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
|
||||
SegmentContainer string // Name of the container to place segments
|
||||
SegmentPrefix string // Prefix to use for the segments
|
||||
NoBuffer bool // Prevents using a bufio.Writer to write segments
|
||||
}
|
||||
|
||||
type LargeObjectFile interface {
|
||||
io.Writer
|
||||
io.Seeker
|
||||
io.Closer
|
||||
Size() int64
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
|
||||
//
|
||||
// opts.Flags can have the following bits set
|
||||
// os.TRUNC - remove the contents of the large object if it exists
|
||||
// os.APPEND - write at the end of the large object
|
||||
func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
|
||||
var (
|
||||
segmentPath string
|
||||
segmentContainer string
|
||||
segments []Object
|
||||
currentLength int64
|
||||
err error
|
||||
)
|
||||
|
||||
if opts.SegmentPrefix != "" {
|
||||
segmentPath = opts.SegmentPrefix
|
||||
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
|
||||
if opts.Flags&os.O_TRUNC != 0 {
|
||||
c.LargeObjectDelete(opts.Container, opts.ObjectName)
|
||||
} else {
|
||||
currentLength = info.Bytes
|
||||
if headers.IsLargeObject() {
|
||||
segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(segments) > 0 {
|
||||
segmentPath = gopath.Dir(segments[0].Name)
|
||||
}
|
||||
} else {
|
||||
if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segments = append(segments, info)
|
||||
}
|
||||
}
|
||||
} else if err != ObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// segmentContainer is not empty when the manifest already existed
|
||||
if segmentContainer == "" {
|
||||
if opts.SegmentContainer != "" {
|
||||
segmentContainer = opts.SegmentContainer
|
||||
} else {
|
||||
segmentContainer = opts.Container + "_segments"
|
||||
}
|
||||
}
|
||||
|
||||
file := &largeObjectCreateFile{
|
||||
conn: c,
|
||||
checkHash: opts.CheckHash,
|
||||
container: opts.Container,
|
||||
objectName: opts.ObjectName,
|
||||
chunkSize: opts.ChunkSize,
|
||||
minChunkSize: opts.MinChunkSize,
|
||||
headers: opts.Headers,
|
||||
segmentContainer: segmentContainer,
|
||||
prefix: segmentPath,
|
||||
segments: segments,
|
||||
currentLength: currentLength,
|
||||
}
|
||||
|
||||
if file.chunkSize == 0 {
|
||||
file.chunkSize = 10 * 1024 * 1024
|
||||
}
|
||||
|
||||
if file.minChunkSize > file.chunkSize {
|
||||
file.chunkSize = file.minChunkSize
|
||||
}
|
||||
|
||||
if opts.Flags&os.O_APPEND != 0 {
|
||||
file.filePos = currentLength
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// LargeObjectDelete deletes the large object named by container, path
|
||||
func (c *Connection) LargeObjectDelete(container string, objectName string) error {
|
||||
_, headers, err := c.Object(container, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var objects [][]string
|
||||
if headers.IsLargeObject() {
|
||||
segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range segments {
|
||||
objects = append(objects, []string{segmentContainer, obj.Name})
|
||||
}
|
||||
}
|
||||
objects = append(objects, []string{container, objectName})
|
||||
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
|
||||
filenames := make([]string, len(objects))
|
||||
for i, obj := range objects {
|
||||
filenames[i] = obj[0] + "/" + obj[1]
|
||||
}
|
||||
_, err = c.doBulkDelete(filenames)
|
||||
// Don't fail on ObjectNotFound because eventual consistency
|
||||
// makes this situation normal.
|
||||
if err != nil && err != Forbidden && err != ObjectNotFound {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for _, obj := range objects {
|
||||
if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LargeObjectGetSegments returns all the segments that compose an object
|
||||
// If the object is a Dynamic Large Object (DLO), it just returns the objects
|
||||
// that have the prefix as indicated by the manifest.
|
||||
// If the object is a Static Large Object (SLO), it retrieves the JSON content
|
||||
// of the manifest and return all the segments of it.
|
||||
func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) {
|
||||
_, headers, err := c.Object(container, path)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return c.getAllSegments(container, path, headers)
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next write operation
|
||||
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case 0:
|
||||
file.filePos = offset
|
||||
case 1:
|
||||
file.filePos += offset
|
||||
case 2:
|
||||
file.filePos = file.currentLength + offset
|
||||
default:
|
||||
return -1, fmt.Errorf("invalid value for whence")
|
||||
}
|
||||
if file.filePos < 0 {
|
||||
return -1, fmt.Errorf("negative offset")
|
||||
}
|
||||
return file.filePos, nil
|
||||
}
|
||||
|
||||
func (file *largeObjectCreateFile) Size() int64 {
|
||||
return file.currentLength
|
||||
}
|
||||
|
||||
func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
|
||||
waitingTime := readAfterWriteWait
|
||||
endTimer := time.After(readAfterWriteTimeout)
|
||||
for {
|
||||
var headers Headers
|
||||
var sz int64
|
||||
if headers, sz, err = fn(); err == nil {
|
||||
if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-endTimer:
|
||||
err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
|
||||
return
|
||||
case <-time.After(waitingTime):
|
||||
waitingTime *= 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) {
|
||||
err = withLORetry(expectedSize, func() (Headers, int64, error) {
|
||||
var info Object
|
||||
var headers Headers
|
||||
info, headers, err = c.objectBase(container, objectName)
|
||||
if err != nil {
|
||||
return headers, 0, err
|
||||
}
|
||||
return headers, info.Bytes, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface
|
||||
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
|
||||
var sz int64
|
||||
var relativeFilePos int
|
||||
writeSegmentIdx := 0
|
||||
for i, obj := range file.segments {
|
||||
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
|
||||
relativeFilePos = int(file.filePos - sz)
|
||||
break
|
||||
}
|
||||
writeSegmentIdx++
|
||||
sz += obj.Bytes
|
||||
}
|
||||
sizeToWrite := len(buf)
|
||||
for offset := 0; offset < sizeToWrite; {
|
||||
newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if writeSegmentIdx < len(file.segments) {
|
||||
file.segments[writeSegmentIdx] = *newSegment
|
||||
} else {
|
||||
file.segments = append(file.segments, *newSegment)
|
||||
}
|
||||
offset += n
|
||||
writeSegmentIdx++
|
||||
relativeFilePos = 0
|
||||
}
|
||||
file.filePos += int64(sizeToWrite)
|
||||
file.currentLength = 0
|
||||
for _, obj := range file.segments {
|
||||
file.currentLength += obj.Bytes
|
||||
}
|
||||
return sizeToWrite, nil
|
||||
}
|
||||
|
||||
func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) {
|
||||
var (
|
||||
readers []io.Reader
|
||||
existingSegment *Object
|
||||
segmentSize int
|
||||
)
|
||||
segmentName := getSegment(file.prefix, writeSegmentIdx+1)
|
||||
sizeToRead := int(file.chunkSize)
|
||||
if writeSegmentIdx < len(file.segments) {
|
||||
existingSegment = &file.segments[writeSegmentIdx]
|
||||
if writeSegmentIdx != len(file.segments)-1 {
|
||||
sizeToRead = int(existingSegment.Bytes)
|
||||
}
|
||||
if relativeFilePos > 0 {
|
||||
headers := make(Headers)
|
||||
headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
|
||||
existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer existingSegmentReader.Close()
|
||||
sizeToRead -= relativeFilePos
|
||||
segmentSize += relativeFilePos
|
||||
readers = []io.Reader{existingSegmentReader}
|
||||
}
|
||||
}
|
||||
if sizeToRead > len(buf) {
|
||||
sizeToRead = len(buf)
|
||||
}
|
||||
segmentSize += sizeToRead
|
||||
readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
|
||||
if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
|
||||
headers := make(Headers)
|
||||
headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
|
||||
tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tailSegmentReader.Close()
|
||||
segmentSize = int(existingSegment.Bytes)
|
||||
readers = append(readers, tailSegmentReader)
|
||||
}
|
||||
segmentReader := io.MultiReader(readers...)
|
||||
headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
|
||||
}
|
||||
|
||||
func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
|
||||
if !opts.NoBuffer {
|
||||
return &bufferedLargeObjectFile{
|
||||
LargeObjectFile: lo,
|
||||
bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
|
||||
}
|
||||
}
|
||||
return lo
|
||||
}
|
||||
|
||||
type bufferedLargeObjectFile struct {
|
||||
LargeObjectFile
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Close() error {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return blo.LargeObjectFile.Close()
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
|
||||
return blo.bw.Write(p)
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return blo.LargeObjectFile.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Size() int64 {
|
||||
return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Flush() error {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return blo.LargeObjectFile.Flush()
|
||||
}
|
174
vendor/src/github.com/ncw/swift/meta.go
vendored
Normal file
174
vendor/src/github.com/ncw/swift/meta.go
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
// Metadata manipulation in and out of Headers
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Metadata stores account, container or object metadata.
|
||||
type Metadata map[string]string
|
||||
|
||||
// Metadata gets the Metadata starting with the metaPrefix out of the Headers.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case
|
||||
func (h Headers) Metadata(metaPrefix string) Metadata {
|
||||
m := Metadata{}
|
||||
metaPrefix = http.CanonicalHeaderKey(metaPrefix)
|
||||
for key, value := range h {
|
||||
if strings.HasPrefix(key, metaPrefix) {
|
||||
metaKey := strings.ToLower(key[len(metaPrefix):])
|
||||
m[metaKey] = value
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// AccountMetadata converts Headers from account to a Metadata.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case.
|
||||
func (h Headers) AccountMetadata() Metadata {
|
||||
return h.Metadata("X-Account-Meta-")
|
||||
}
|
||||
|
||||
// ContainerMetadata converts Headers from container to a Metadata.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case.
|
||||
func (h Headers) ContainerMetadata() Metadata {
|
||||
return h.Metadata("X-Container-Meta-")
|
||||
}
|
||||
|
||||
// ObjectMetadata converts Headers from object to a Metadata.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case.
|
||||
func (h Headers) ObjectMetadata() Metadata {
|
||||
return h.Metadata("X-Object-Meta-")
|
||||
}
|
||||
|
||||
// Headers convert the Metadata starting with the metaPrefix into a
|
||||
// Headers.
|
||||
//
|
||||
// The keys in the Metadata will be converted from lower case to http
|
||||
// Canonical (see http.CanonicalHeaderKey).
|
||||
func (m Metadata) Headers(metaPrefix string) Headers {
|
||||
h := Headers{}
|
||||
for key, value := range m {
|
||||
key = http.CanonicalHeaderKey(metaPrefix + key)
|
||||
h[key] = value
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// AccountHeaders converts the Metadata for the account.
|
||||
func (m Metadata) AccountHeaders() Headers {
|
||||
return m.Headers("X-Account-Meta-")
|
||||
}
|
||||
|
||||
// ContainerHeaders converts the Metadata for the container.
|
||||
func (m Metadata) ContainerHeaders() Headers {
|
||||
return m.Headers("X-Container-Meta-")
|
||||
}
|
||||
|
||||
// ObjectHeaders converts the Metadata for the object.
|
||||
func (m Metadata) ObjectHeaders() Headers {
|
||||
return m.Headers("X-Object-Meta-")
|
||||
}
|
||||
|
||||
// Turns a number of ns into a floating point string in seconds
|
||||
//
|
||||
// Trims trailing zeros and guaranteed to be perfectly accurate
|
||||
func nsToFloatString(ns int64) string {
|
||||
if ns < 0 {
|
||||
return "-" + nsToFloatString(-ns)
|
||||
}
|
||||
result := fmt.Sprintf("%010d", ns)
|
||||
split := len(result) - 9
|
||||
result, decimals := result[:split], result[split:]
|
||||
decimals = strings.TrimRight(decimals, "0")
|
||||
if decimals != "" {
|
||||
result += "."
|
||||
result += decimals
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Turns a floating point string in seconds into a ns integer
|
||||
//
|
||||
// Guaranteed to be perfectly accurate
|
||||
func floatStringToNs(s string) (int64, error) {
|
||||
const zeros = "000000000"
|
||||
if point := strings.IndexRune(s, '.'); point >= 0 {
|
||||
tail := s[point+1:]
|
||||
if fill := 9 - len(tail); fill < 0 {
|
||||
tail = tail[:9]
|
||||
} else {
|
||||
tail += zeros[:fill]
|
||||
}
|
||||
s = s[:point] + tail
|
||||
} else if len(s) > 0 { // Make sure empty string produces an error
|
||||
s += zeros
|
||||
}
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
// FloatStringToTime converts a floating point number string to a time.Time
|
||||
//
|
||||
// The string is floating point number of seconds since the epoch
|
||||
// (Unix time). The number should be in fixed point format (not
|
||||
// exponential), eg "1354040105.123456789" which represents the time
|
||||
// "2012-11-27T18:15:05.123456789Z"
|
||||
//
|
||||
// Some care is taken to preserve all the accuracy in the time.Time
|
||||
// (which wouldn't happen with a naive conversion through float64) so
|
||||
// a round trip conversion won't change the data.
|
||||
//
|
||||
// If an error is returned then time will be returned as the zero time.
|
||||
func FloatStringToTime(s string) (t time.Time, err error) {
|
||||
ns, err := floatStringToNs(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
t = time.Unix(0, ns)
|
||||
return
|
||||
}
|
||||
|
||||
// TimeToFloatString converts a time.Time object to a floating point string
|
||||
//
|
||||
// The string is floating point number of seconds since the epoch
|
||||
// (Unix time). The number is in fixed point format (not
|
||||
// exponential), eg "1354040105.123456789" which represents the time
|
||||
// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped
|
||||
// from the output.
|
||||
//
|
||||
// Some care is taken to preserve all the accuracy in the time.Time
|
||||
// (which wouldn't happen with a naive conversion through float64) so
|
||||
// a round trip conversion won't change the data.
|
||||
func TimeToFloatString(t time.Time) string {
|
||||
return nsToFloatString(t.UnixNano())
|
||||
}
|
||||
|
||||
// Read a modification time (mtime) from a Metadata object
|
||||
//
|
||||
// This is a defacto standard (used in the official python-swiftclient
|
||||
// amongst others) for storing the modification time (as read using
|
||||
// os.Stat) for an object. It is stored using the key 'mtime', which
|
||||
// for example when written to an object will be 'X-Object-Meta-Mtime'.
|
||||
//
|
||||
// If an error is returned then time will be returned as the zero time.
|
||||
func (m Metadata) GetModTime() (t time.Time, err error) {
|
||||
return FloatStringToTime(m["mtime"])
|
||||
}
|
||||
|
||||
// Write an modification time (mtime) to a Metadata object
|
||||
//
|
||||
// This is a defacto standard (used in the official python-swiftclient
|
||||
// amongst others) for storing the modification time (as read using
|
||||
// os.Stat) for an object. It is stored using the key 'mtime', which
|
||||
// for example when written to an object will be 'X-Object-Meta-Mtime'.
|
||||
func (m Metadata) SetModTime(t time.Time) {
|
||||
m["mtime"] = TimeToFloatString(t)
|
||||
}
|
213
vendor/src/github.com/ncw/swift/meta_test.go
vendored
Normal file
213
vendor/src/github.com/ncw/swift/meta_test.go
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
// Tests for swift metadata
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHeadersToMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadersToAccountMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadersToContainerMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadersToObjectMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToAccountHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToContainerHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToObjectHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNsToFloatString(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns int64
|
||||
fs string
|
||||
}{
|
||||
{0, "0"},
|
||||
{1, "0.000000001"},
|
||||
{1000, "0.000001"},
|
||||
{1000000, "0.001"},
|
||||
{100000000, "0.1"},
|
||||
{1000000000, "1"},
|
||||
{10000000000, "10"},
|
||||
{12345678912, "12.345678912"},
|
||||
{12345678910, "12.34567891"},
|
||||
{12345678900, "12.3456789"},
|
||||
{12345678000, "12.345678"},
|
||||
{12345670000, "12.34567"},
|
||||
{12345600000, "12.3456"},
|
||||
{12345000000, "12.345"},
|
||||
{12340000000, "12.34"},
|
||||
{12300000000, "12.3"},
|
||||
{12000000000, "12"},
|
||||
{10000000000, "10"},
|
||||
{1347717491123123123, "1347717491.123123123"},
|
||||
} {
|
||||
if nsToFloatString(d.ns) != d.fs {
|
||||
t.Error("Failed", d.ns, "!=", d.fs)
|
||||
}
|
||||
if d.ns > 0 && nsToFloatString(-d.ns) != "-"+d.fs {
|
||||
t.Error("Failed on negative", d.ns, "!=", d.fs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatStringToNs(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns int64
|
||||
fs string
|
||||
}{
|
||||
{0, "0"},
|
||||
{0, "0."},
|
||||
{0, ".0"},
|
||||
{0, "0.0"},
|
||||
{0, "0.0000000001"},
|
||||
{1, "0.000000001"},
|
||||
{1000, "0.000001"},
|
||||
{1000000, "0.001"},
|
||||
{100000000, "0.1"},
|
||||
{100000000, "0.10"},
|
||||
{100000000, "0.1000000001"},
|
||||
{1000000000, "1"},
|
||||
{1000000000, "1."},
|
||||
{1000000000, "1.0"},
|
||||
{10000000000, "10"},
|
||||
{12345678912, "12.345678912"},
|
||||
{12345678912, "12.3456789129"},
|
||||
{12345678912, "12.34567891299"},
|
||||
{12345678910, "12.34567891"},
|
||||
{12345678900, "12.3456789"},
|
||||
{12345678000, "12.345678"},
|
||||
{12345670000, "12.34567"},
|
||||
{12345600000, "12.3456"},
|
||||
{12345000000, "12.345"},
|
||||
{12340000000, "12.34"},
|
||||
{12300000000, "12.3"},
|
||||
{12000000000, "12"},
|
||||
{10000000000, "10"},
|
||||
// This is a typical value which has more bits in than a float64
|
||||
{1347717491123123123, "1347717491.123123123"},
|
||||
} {
|
||||
ns, err := floatStringToNs(d.fs)
|
||||
if err != nil {
|
||||
t.Error("Failed conversion", err)
|
||||
}
|
||||
if ns != d.ns {
|
||||
t.Error("Failed", d.fs, "!=", d.ns, "was", ns)
|
||||
}
|
||||
if d.ns > 0 {
|
||||
ns, err := floatStringToNs("-" + d.fs)
|
||||
if err != nil {
|
||||
t.Error("Failed conversion", err)
|
||||
}
|
||||
if ns != -d.ns {
|
||||
t.Error("Failed on negative", -d.ns, "!=", "-"+d.fs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// These are expected to produce errors
|
||||
for _, fs := range []string{
|
||||
"",
|
||||
" 1",
|
||||
"- 1",
|
||||
"- 1",
|
||||
"1.-1",
|
||||
"1.0.0",
|
||||
"1x0",
|
||||
} {
|
||||
ns, err := floatStringToNs(fs)
|
||||
if err == nil {
|
||||
t.Error("Didn't produce expected error", fs, ns)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetModTime(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns string
|
||||
t string
|
||||
}{
|
||||
{"1354040105", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.0", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.000000000000", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.123", "2012-11-27T18:15:05.123Z"},
|
||||
{"1354040105.123456", "2012-11-27T18:15:05.123456Z"},
|
||||
{"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"},
|
||||
{"1354040105.123456789123", "2012-11-27T18:15:05.123456789Z"},
|
||||
{"0", "1970-01-01T00:00:00.000000000Z"},
|
||||
} {
|
||||
expected, err := time.Parse(time.RFC3339, d.t)
|
||||
if err != nil {
|
||||
t.Error("Bad test", err)
|
||||
}
|
||||
m := Metadata{"mtime": d.ns}
|
||||
actual, err := m.GetModTime()
|
||||
if err != nil {
|
||||
t.Error("Parse error", err)
|
||||
}
|
||||
if !actual.Equal(expected) {
|
||||
t.Error("Expecting", expected, expected.UnixNano(), "got", actual, actual.UnixNano())
|
||||
}
|
||||
}
|
||||
for _, ns := range []string{
|
||||
"EMPTY",
|
||||
"",
|
||||
" 1",
|
||||
"- 1",
|
||||
"- 1",
|
||||
"1.-1",
|
||||
"1.0.0",
|
||||
"1x0",
|
||||
} {
|
||||
m := Metadata{}
|
||||
if ns != "EMPTY" {
|
||||
m["mtime"] = ns
|
||||
}
|
||||
actual, err := m.GetModTime()
|
||||
if err == nil {
|
||||
t.Error("Expected error not produced")
|
||||
}
|
||||
if !actual.IsZero() {
|
||||
t.Error("Expected output to be zero")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetModTime(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns string
|
||||
t string
|
||||
}{
|
||||
{"1354040105", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105", "2012-11-27T18:15:05.000000Z"},
|
||||
{"1354040105.123", "2012-11-27T18:15:05.123Z"},
|
||||
{"1354040105.123456", "2012-11-27T18:15:05.123456Z"},
|
||||
{"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"},
|
||||
{"0", "1970-01-01T00:00:00.000000000Z"},
|
||||
} {
|
||||
time, err := time.Parse(time.RFC3339, d.t)
|
||||
if err != nil {
|
||||
t.Error("Bad test", err)
|
||||
}
|
||||
m := Metadata{}
|
||||
m.SetModTime(time)
|
||||
if m["mtime"] != d.ns {
|
||||
t.Error("mtime wrong", m, "should be", d.ns)
|
||||
}
|
||||
}
|
||||
}
|
55
vendor/src/github.com/ncw/swift/notes.txt
vendored
Normal file
55
vendor/src/github.com/ncw/swift/notes.txt
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
Notes on Go Swift
|
||||
=================
|
||||
|
||||
Make a builder style interface like the Google Go APIs? Advantages
|
||||
are that it is easy to add named methods to the service object to do
|
||||
specific things. Slightly less efficient. Not sure about how to
|
||||
return extra stuff though - in an object?
|
||||
|
||||
Make a container struct so these could be methods on it?
|
||||
|
||||
Make noResponse check for 204?
|
||||
|
||||
Make storage public so it can be extended easily?
|
||||
|
||||
Rename to go-swift to match user agent string?
|
||||
|
||||
Reconnect on auth error - 401 when token expires isn't tested
|
||||
|
||||
Make more api compatible with python cloudfiles?
|
||||
|
||||
Retry operations on timeout / network errors?
|
||||
- also 408 error
|
||||
- GET requests only?
|
||||
|
||||
Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock
|
||||
|
||||
Add extra headers field to Connection (for via etc)
|
||||
|
||||
Make errors use an error heirachy then can catch them with a type assertion
|
||||
|
||||
Error(...)
|
||||
ObjectCorrupted{ Error }
|
||||
|
||||
Make a Debug flag in connection for logging stuff
|
||||
|
||||
Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc
|
||||
|
||||
Object range
|
||||
|
||||
Object create, update with X-Delete-At or X-Delete-After
|
||||
|
||||
Large object support
|
||||
- check uploads are less than 5GB in normal mode?
|
||||
|
||||
Access control CORS?
|
||||
|
||||
Swift client retries and backs off for all types of errors
|
||||
|
||||
Implement net error interface?
|
||||
|
||||
type Error interface {
|
||||
error
|
||||
Timeout() bool // Is the error a timeout?
|
||||
Temporary() bool // Is the error temporary?
|
||||
}
|
83
vendor/src/github.com/ncw/swift/rs/rs.go
vendored
Normal file
83
vendor/src/github.com/ncw/swift/rs/rs.go
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package rs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
// RsConnection is a RackSpace specific wrapper to the core swift library which
|
||||
// exposes the RackSpace CDN commands via the CDN Management URL interface.
|
||||
type RsConnection struct {
|
||||
swift.Connection
|
||||
cdnUrl string
|
||||
}
|
||||
|
||||
// manage is similar to the swift storage method, but uses the CDN Management URL for CDN specific calls.
|
||||
func (c *RsConnection) manage(p swift.RequestOpts) (resp *http.Response, headers swift.Headers, err error) {
|
||||
p.OnReAuth = func() (string, error) {
|
||||
if c.cdnUrl == "" {
|
||||
c.cdnUrl = c.Auth.CdnUrl()
|
||||
}
|
||||
if c.cdnUrl == "" {
|
||||
return "", errors.New("The X-CDN-Management-Url does not exist on the authenticated platform")
|
||||
}
|
||||
return c.cdnUrl, nil
|
||||
}
|
||||
if c.Authenticated() {
|
||||
_, err = p.OnReAuth()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return c.Connection.Call(c.cdnUrl, p)
|
||||
}
|
||||
|
||||
// ContainerCDNEnable enables a container for public CDN usage.
|
||||
//
|
||||
// Change the default TTL of 259200 seconds (72 hours) by passing in an integer value.
|
||||
//
|
||||
// This method can be called again to change the TTL.
|
||||
func (c *RsConnection) ContainerCDNEnable(container string, ttl int) (swift.Headers, error) {
|
||||
h := swift.Headers{"X-CDN-Enabled": "true"}
|
||||
if ttl > 0 {
|
||||
h["X-TTL"] = strconv.Itoa(ttl)
|
||||
}
|
||||
|
||||
_, headers, err := c.manage(swift.RequestOpts{
|
||||
Container: container,
|
||||
Operation: "PUT",
|
||||
ErrorMap: swift.ContainerErrorMap,
|
||||
NoResponse: true,
|
||||
Headers: h,
|
||||
})
|
||||
return headers, err
|
||||
}
|
||||
|
||||
// ContainerCDNDisable disables CDN access to a container.
|
||||
func (c *RsConnection) ContainerCDNDisable(container string) error {
|
||||
h := swift.Headers{"X-CDN-Enabled": "false"}
|
||||
|
||||
_, _, err := c.manage(swift.RequestOpts{
|
||||
Container: container,
|
||||
Operation: "PUT",
|
||||
ErrorMap: swift.ContainerErrorMap,
|
||||
NoResponse: true,
|
||||
Headers: h,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// ContainerCDNMeta returns the CDN metadata for a container.
|
||||
func (c *RsConnection) ContainerCDNMeta(container string) (swift.Headers, error) {
|
||||
_, headers, err := c.manage(swift.RequestOpts{
|
||||
Container: container,
|
||||
Operation: "HEAD",
|
||||
ErrorMap: swift.ContainerErrorMap,
|
||||
NoResponse: true,
|
||||
Headers: swift.Headers{},
|
||||
})
|
||||
return headers, err
|
||||
}
|
96
vendor/src/github.com/ncw/swift/rs/rs_test.go
vendored
Normal file
96
vendor/src/github.com/ncw/swift/rs/rs_test.go
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// See swift_test.go for requirements to run this test.
|
||||
package rs_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/swift/rs"
|
||||
)
|
||||
|
||||
var (
|
||||
c rs.RsConnection
|
||||
)
|
||||
|
||||
const (
|
||||
CONTAINER = "GoSwiftUnitTest"
|
||||
OBJECT = "test_object"
|
||||
CONTENTS = "12345"
|
||||
CONTENT_SIZE = int64(len(CONTENTS))
|
||||
CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b"
|
||||
)
|
||||
|
||||
// Test functions are run in order - this one must be first!
|
||||
func TestAuthenticate(t *testing.T) {
|
||||
UserName := os.Getenv("SWIFT_API_USER")
|
||||
ApiKey := os.Getenv("SWIFT_API_KEY")
|
||||
AuthUrl := os.Getenv("SWIFT_AUTH_URL")
|
||||
if UserName == "" || ApiKey == "" || AuthUrl == "" {
|
||||
t.Fatal("SWIFT_API_USER, SWIFT_API_KEY and SWIFT_AUTH_URL not all set")
|
||||
}
|
||||
c = rs.RsConnection{}
|
||||
c.UserName = UserName
|
||||
c.ApiKey = ApiKey
|
||||
c.AuthUrl = AuthUrl
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal("Auth failed", err)
|
||||
}
|
||||
if !c.Authenticated() {
|
||||
t.Fatal("Not authenticated")
|
||||
}
|
||||
}
|
||||
|
||||
// Setup
|
||||
func TestContainerCreate(t *testing.T) {
|
||||
err := c.ContainerCreate(CONTAINER, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNEnable(t *testing.T) {
|
||||
headers, err := c.ContainerCDNEnable(CONTAINER, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, ok := headers["X-Cdn-Uri"]; !ok {
|
||||
t.Error("Failed to enable CDN for container")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnReAuth(t *testing.T) {
|
||||
c2 := rs.RsConnection{}
|
||||
c2.UserName = c.UserName
|
||||
c2.ApiKey = c.ApiKey
|
||||
c2.AuthUrl = c.AuthUrl
|
||||
_, err := c2.ContainerCDNEnable(CONTAINER, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reauthenticate: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNMeta(t *testing.T) {
|
||||
headers, err := c.ContainerCDNMeta(CONTAINER)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, ok := headers["X-Cdn-Uri"]; !ok {
|
||||
t.Error("CDN is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNDisable(t *testing.T) {
|
||||
err := c.ContainerCDNDisable(CONTAINER) // files stick in CDN until TTL expires
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Teardown
|
||||
func TestContainerDelete(t *testing.T) {
|
||||
err := c.ContainerDelete(CONTAINER)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
168
vendor/src/github.com/ncw/swift/slo.go
vendored
Normal file
168
vendor/src/github.com/ncw/swift/slo.go
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StaticLargeObjectCreateFile represents an open static large object
|
||||
type StaticLargeObjectCreateFile struct {
|
||||
largeObjectCreateFile
|
||||
}
|
||||
|
||||
var SLONotSupported = errors.New("SLO not supported")
|
||||
|
||||
type swiftSegment struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Etag string `json:"etag,omitempty"`
|
||||
Size int64 `json:"size_bytes,omitempty"`
|
||||
// When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
|
||||
// but when querying the JSON content of a manifest with the `multipart-manifest=get`
|
||||
// parameter, Swift names those attributes `name`, `hash` and `bytes`.
|
||||
// We use all the different attributes names in this structure to be able to use
|
||||
// the same structure for both uploading and retrieving.
|
||||
Name string `json:"name,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
Bytes int64 `json:"bytes,omitempty"`
|
||||
ContentType string `json:"content_type,omitempty"`
|
||||
LastModified string `json:"last_modified,omitempty"`
|
||||
}
|
||||
|
||||
// StaticLargeObjectCreateFile creates a static large object returning
|
||||
// an object which satisfies io.Writer, io.Seeker, io.Closer and
|
||||
// io.ReaderFrom. The flags are as passed to the largeObjectCreate
|
||||
// method.
|
||||
func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err != nil || !info.SupportsSLO() {
|
||||
return nil, SLONotSupported
|
||||
}
|
||||
realMinChunkSize := info.SLOMinSegmentSize()
|
||||
if realMinChunkSize > opts.MinChunkSize {
|
||||
opts.MinChunkSize = realMinChunkSize
|
||||
}
|
||||
lo, err := c.largeObjectCreate(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withBuffer(opts, &StaticLargeObjectCreateFile{
|
||||
largeObjectCreateFile: *lo,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// StaticLargeObjectCreate creates or truncates an existing static
|
||||
// large object returning a writeable object. This sets opts.Flags to
|
||||
// an appropriate value before calling StaticLargeObjectCreateFile
|
||||
func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
||||
return c.StaticLargeObjectCreateFile(opts)
|
||||
}
|
||||
|
||||
// StaticLargeObjectDelete deletes a static large object and all of its segments.
|
||||
func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err != nil || !info.SupportsSLO() {
|
||||
return SLONotSupported
|
||||
}
|
||||
return c.LargeObjectDelete(container, path)
|
||||
}
|
||||
|
||||
// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
||||
func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
||||
swiftInfo, err := c.cachedQueryInfo()
|
||||
if err != nil || !swiftInfo.SupportsSLO() {
|
||||
return SLONotSupported
|
||||
}
|
||||
info, headers, err := c.Object(srcContainer, srcObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSLOManifest creates a static large object manifest
|
||||
func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object) error {
|
||||
sloSegments := make([]swiftSegment, len(segments))
|
||||
for i, segment := range segments {
|
||||
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
|
||||
sloSegments[i].Etag = segment.Hash
|
||||
sloSegments[i].Size = segment.Bytes
|
||||
}
|
||||
|
||||
content, err := json.Marshal(sloSegments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("multipart-manifest", "put")
|
||||
if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, nil, values); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *StaticLargeObjectCreateFile) Close() error {
|
||||
return file.Flush()
|
||||
}
|
||||
|
||||
func (file *StaticLargeObjectCreateFile) Flush() error {
|
||||
if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments); err != nil {
|
||||
return err
|
||||
}
|
||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
||||
}
|
||||
|
||||
func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) {
|
||||
var (
|
||||
segmentList []swiftSegment
|
||||
segments []Object
|
||||
segPath string
|
||||
segmentContainer string
|
||||
)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("multipart-manifest", "get")
|
||||
|
||||
file, _, err := c.objectOpen(container, path, true, nil, values)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
json.Unmarshal(content, &segmentList)
|
||||
for _, segment := range segmentList {
|
||||
segmentContainer, segPath = parseFullPath(segment.Name[1:])
|
||||
segments = append(segments, Object{
|
||||
Name: segPath,
|
||||
Bytes: segment.Bytes,
|
||||
Hash: segment.Hash,
|
||||
})
|
||||
}
|
||||
|
||||
return segmentContainer, segments, nil
|
||||
}
|
2053
vendor/src/github.com/ncw/swift/swift.go
vendored
Normal file
2053
vendor/src/github.com/ncw/swift/swift.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
432
vendor/src/github.com/ncw/swift/swift_internal_test.go
vendored
Normal file
432
vendor/src/github.com/ncw/swift/swift_internal_test.go
vendored
Normal file
|
@ -0,0 +1,432 @@
|
|||
// This tests the swift package internals
|
||||
//
|
||||
// It does not require access to a swift server
|
||||
//
|
||||
// FIXME need to add more tests and to check URLs and parameters
|
||||
package swift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
TEST_ADDRESS = "localhost:5324"
|
||||
AUTH_URL = "http://" + TEST_ADDRESS + "/v1.0"
|
||||
PROXY_URL = "http://" + TEST_ADDRESS + "/proxy"
|
||||
USERNAME = "test"
|
||||
APIKEY = "apikey"
|
||||
AUTH_TOKEN = "token"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
server *SwiftServer
|
||||
c *Connection
|
||||
)
|
||||
|
||||
// SwiftServer implements a test swift server
|
||||
type SwiftServer struct {
|
||||
t *testing.T
|
||||
checks []*Check
|
||||
}
|
||||
|
||||
// Used to check and reply to http transactions
|
||||
type Check struct {
|
||||
in Headers
|
||||
out Headers
|
||||
rx *string
|
||||
tx *string
|
||||
err *Error
|
||||
url *string
|
||||
}
|
||||
|
||||
// Add a in check
|
||||
func (check *Check) In(in Headers) *Check {
|
||||
check.in = in
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an out check
|
||||
func (check *Check) Out(out Headers) *Check {
|
||||
check.out = out
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an Error check
|
||||
func (check *Check) Error(StatusCode int, Text string) *Check {
|
||||
check.err = newError(StatusCode, Text)
|
||||
return check
|
||||
}
|
||||
|
||||
// Add a rx check
|
||||
func (check *Check) Rx(rx string) *Check {
|
||||
check.rx = &rx
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an tx check
|
||||
func (check *Check) Tx(tx string) *Check {
|
||||
check.tx = &tx
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an URL check
|
||||
func (check *Check) Url(url string) *Check {
|
||||
check.url = &url
|
||||
return check
|
||||
}
|
||||
|
||||
// Add a check
|
||||
func (s *SwiftServer) AddCheck(t *testing.T) *Check {
|
||||
server.t = t
|
||||
check := &Check{
|
||||
in: Headers{},
|
||||
out: Headers{},
|
||||
err: nil,
|
||||
}
|
||||
s.checks = append(s.checks, check)
|
||||
return check
|
||||
}
|
||||
|
||||
// Responds to a request
|
||||
func (s *SwiftServer) Respond(w http.ResponseWriter, r *http.Request) {
|
||||
if len(s.checks) < 1 {
|
||||
s.t.Fatal("Unexpected http transaction")
|
||||
}
|
||||
check := s.checks[0]
|
||||
s.checks = s.checks[1:]
|
||||
|
||||
// Check URL
|
||||
if check.url != nil && *check.url != r.URL.String() {
|
||||
s.t.Errorf("Expecting URL %q but got %q", *check.url, r.URL)
|
||||
}
|
||||
|
||||
// Check headers
|
||||
for k, v := range check.in {
|
||||
actual := r.Header.Get(k)
|
||||
if actual != v {
|
||||
s.t.Errorf("Expecting header %q=%q but got %q", k, v, actual)
|
||||
}
|
||||
}
|
||||
// Write output headers
|
||||
h := w.Header()
|
||||
for k, v := range check.out {
|
||||
h.Set(k, v)
|
||||
}
|
||||
// Return an error if required
|
||||
if check.err != nil {
|
||||
http.Error(w, check.err.Text, check.err.StatusCode)
|
||||
} else {
|
||||
if check.tx != nil {
|
||||
_, err := w.Write([]byte(*check.tx))
|
||||
if err != nil {
|
||||
s.t.Error("Write failed", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks to see all responses are used up
|
||||
func (s *SwiftServer) Finished() {
|
||||
if len(s.checks) > 0 {
|
||||
s.t.Error("Unused checks", s.checks)
|
||||
}
|
||||
}
|
||||
|
||||
func handle(w http.ResponseWriter, r *http.Request) {
|
||||
// out, _ := httputil.DumpRequest(r, true)
|
||||
// os.Stdout.Write(out)
|
||||
server.Respond(w, r)
|
||||
}
|
||||
|
||||
func NewSwiftServer() *SwiftServer {
|
||||
server := &SwiftServer{}
|
||||
http.HandleFunc("/", handle)
|
||||
go http.ListenAndServe(TEST_ADDRESS, nil)
|
||||
fmt.Print("Waiting for server to start ")
|
||||
for {
|
||||
fmt.Print(".")
|
||||
conn, err := net.Dial("tcp", TEST_ADDRESS)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
fmt.Println(" Started")
|
||||
break
|
||||
}
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
func init() {
|
||||
server = NewSwiftServer()
|
||||
c = &Connection{
|
||||
UserName: USERNAME,
|
||||
ApiKey: APIKEY,
|
||||
AuthUrl: AUTH_URL,
|
||||
}
|
||||
}
|
||||
|
||||
// Check the error is a swift error
|
||||
func checkError(t *testing.T, err error, StatusCode int, Text string) {
|
||||
if err == nil {
|
||||
t.Fatal("No error returned")
|
||||
}
|
||||
err2, ok := err.(*Error)
|
||||
if !ok {
|
||||
t.Fatal("Bad error type")
|
||||
}
|
||||
if err2.StatusCode != StatusCode {
|
||||
t.Fatalf("Bad status code, expecting %d got %d", StatusCode, err2.StatusCode)
|
||||
}
|
||||
if err2.Text != Text {
|
||||
t.Fatalf("Bad error string, expecting %q got %q", Text, err2.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME copied from swift_test.go
|
||||
func compareMaps(t *testing.T, a, b map[string]string) {
|
||||
if len(a) != len(b) {
|
||||
t.Error("Maps different sizes", a, b)
|
||||
}
|
||||
for ka, va := range a {
|
||||
if vb, ok := b[ka]; !ok || va != vb {
|
||||
t.Error("Difference in key", ka, va, b[ka])
|
||||
}
|
||||
}
|
||||
for kb, vb := range b {
|
||||
if va, ok := a[kb]; !ok || vb != va {
|
||||
t.Error("Difference in key", kb, vb, a[kb])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalError(t *testing.T) {
|
||||
e := newError(404, "Not Found!")
|
||||
if e.StatusCode != 404 || e.Text != "Not Found!" {
|
||||
t.Fatal("Bad error")
|
||||
}
|
||||
if e.Error() != "Not Found!" {
|
||||
t.Fatal("Bad error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testCheckClose(rd io.ReadCloser, e error) (err error) {
|
||||
err = e
|
||||
defer checkClose(rd, &err)
|
||||
return
|
||||
}
|
||||
|
||||
// Make a closer which returns the error of our choice
|
||||
type myCloser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *myCloser) Read([]byte) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (c *myCloser) Close() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
func TestInternalCheckClose(t *testing.T) {
|
||||
if testCheckClose(&myCloser{nil}, nil) != nil {
|
||||
t.Fatal("bad 1")
|
||||
}
|
||||
if testCheckClose(&myCloser{nil}, ObjectCorrupted) != ObjectCorrupted {
|
||||
t.Fatal("bad 2")
|
||||
}
|
||||
if testCheckClose(&myCloser{ObjectNotFound}, nil) != ObjectNotFound {
|
||||
t.Fatal("bad 3")
|
||||
}
|
||||
if testCheckClose(&myCloser{ObjectNotFound}, ObjectCorrupted) != ObjectCorrupted {
|
||||
t.Fatal("bad 4")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalParseHeaders(t *testing.T) {
|
||||
resp := &http.Response{StatusCode: 200}
|
||||
if c.parseHeaders(resp, nil) != nil {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
if c.parseHeaders(resp, authErrorMap) != nil {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
|
||||
resp = &http.Response{StatusCode: 299}
|
||||
if c.parseHeaders(resp, nil) != nil {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
|
||||
resp = &http.Response{StatusCode: 199, Status: "BOOM"}
|
||||
checkError(t, c.parseHeaders(resp, nil), 199, "HTTP Error: 199: BOOM")
|
||||
|
||||
resp = &http.Response{StatusCode: 300, Status: "BOOM"}
|
||||
checkError(t, c.parseHeaders(resp, nil), 300, "HTTP Error: 300: BOOM")
|
||||
|
||||
resp = &http.Response{StatusCode: 404, Status: "BOOM"}
|
||||
checkError(t, c.parseHeaders(resp, nil), 404, "HTTP Error: 404: BOOM")
|
||||
if c.parseHeaders(resp, ContainerErrorMap) != ContainerNotFound {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
if c.parseHeaders(resp, objectErrorMap) != ObjectNotFound {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalReadHeaders(t *testing.T) {
|
||||
resp := &http.Response{Header: http.Header{}}
|
||||
compareMaps(t, readHeaders(resp), Headers{})
|
||||
|
||||
resp = &http.Response{Header: http.Header{
|
||||
"one": []string{"1"},
|
||||
"two": []string{"2"},
|
||||
}}
|
||||
compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"})
|
||||
|
||||
// FIXME this outputs a log which we should test and check
|
||||
resp = &http.Response{Header: http.Header{
|
||||
"one": []string{"1", "11", "111"},
|
||||
"two": []string{"2"},
|
||||
}}
|
||||
compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"})
|
||||
}
|
||||
|
||||
func TestInternalStorage(t *testing.T) {
|
||||
// FIXME
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func TestInternalAuthenticate(t *testing.T) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Key": APIKEY,
|
||||
"X-Auth-User": USERNAME,
|
||||
}).Out(Headers{
|
||||
"X-Storage-Url": PROXY_URL,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
}).Url("/v1.0")
|
||||
defer server.Finished()
|
||||
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c.StorageUrl != PROXY_URL {
|
||||
t.Error("Bad storage url")
|
||||
}
|
||||
if c.AuthToken != AUTH_TOKEN {
|
||||
t.Error("Bad auth token")
|
||||
}
|
||||
if !c.Authenticated() {
|
||||
t.Error("Didn't authenticate")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalAuthenticateDenied(t *testing.T) {
|
||||
server.AddCheck(t).Error(400, "Bad request")
|
||||
server.AddCheck(t).Error(401, "DENIED")
|
||||
defer server.Finished()
|
||||
c.UnAuthenticate()
|
||||
err := c.Authenticate()
|
||||
if err != AuthorizationFailed {
|
||||
t.Fatal("Expecting AuthorizationFailed", err)
|
||||
}
|
||||
// FIXME
|
||||
// if c.Authenticated() {
|
||||
// t.Fatal("Expecting not authenticated")
|
||||
// }
|
||||
}
|
||||
|
||||
func TestInternalAuthenticateBad(t *testing.T) {
|
||||
server.AddCheck(t).Out(Headers{
|
||||
"X-Storage-Url": PROXY_URL,
|
||||
})
|
||||
defer server.Finished()
|
||||
err := c.Authenticate()
|
||||
checkError(t, err, 0, "Response didn't have storage url and auth token")
|
||||
if c.Authenticated() {
|
||||
t.Fatal("Expecting not authenticated")
|
||||
}
|
||||
|
||||
server.AddCheck(t).Out(Headers{
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
})
|
||||
err = c.Authenticate()
|
||||
checkError(t, err, 0, "Response didn't have storage url and auth token")
|
||||
if c.Authenticated() {
|
||||
t.Fatal("Expecting not authenticated")
|
||||
}
|
||||
|
||||
server.AddCheck(t)
|
||||
err = c.Authenticate()
|
||||
checkError(t, err, 0, "Response didn't have storage url and auth token")
|
||||
if c.Authenticated() {
|
||||
t.Fatal("Expecting not authenticated")
|
||||
}
|
||||
|
||||
server.AddCheck(t).Out(Headers{
|
||||
"X-Storage-Url": PROXY_URL,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
})
|
||||
err = c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !c.Authenticated() {
|
||||
t.Fatal("Expecting authenticated")
|
||||
}
|
||||
}
|
||||
|
||||
func testContainerNames(t *testing.T, rx string, expected []string) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
}).Tx(rx).Url("/proxy")
|
||||
containers, err := c.ContainerNames(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(containers) != len(expected) {
|
||||
t.Fatal("Wrong number of containers", len(containers), rx, len(expected), expected)
|
||||
}
|
||||
for i := range containers {
|
||||
if containers[i] != expected[i] {
|
||||
t.Error("Bad container", containers[i], expected[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestInternalContainerNames(t *testing.T) {
|
||||
defer server.Finished()
|
||||
testContainerNames(t, "", []string{})
|
||||
testContainerNames(t, "one", []string{"one"})
|
||||
testContainerNames(t, "one\n", []string{"one"})
|
||||
testContainerNames(t, "one\ntwo\nthree\n", []string{"one", "two", "three"})
|
||||
}
|
||||
|
||||
func TestInternalObjectPutBytes(t *testing.T) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
"Content-Length": "5",
|
||||
"Content-Type": "text/plain",
|
||||
}).Rx("12345")
|
||||
defer server.Finished()
|
||||
c.ObjectPutBytes("container", "object", []byte{'1', '2', '3', '4', '5'}, "text/plain")
|
||||
}
|
||||
|
||||
func TestInternalObjectPutString(t *testing.T) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
"Content-Length": "5",
|
||||
"Content-Type": "text/plain",
|
||||
}).Rx("12345")
|
||||
defer server.Finished()
|
||||
c.ObjectPutString("container", "object", "12345", "text/plain")
|
||||
}
|
2891
vendor/src/github.com/ncw/swift/swift_test.go
vendored
Normal file
2891
vendor/src/github.com/ncw/swift/swift_test.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
1094
vendor/src/github.com/ncw/swift/swifttest/server.go
vendored
Normal file
1094
vendor/src/github.com/ncw/swift/swifttest/server.go
vendored
Normal file
File diff suppressed because it is too large
Load diff
57
vendor/src/github.com/ncw/swift/timeout_reader.go
vendored
Normal file
57
vendor/src/github.com/ncw/swift/timeout_reader.go
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An io.ReadCloser which obeys an idle timeout
|
||||
type timeoutReader struct {
|
||||
reader io.ReadCloser
|
||||
timeout time.Duration
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// Returns a wrapper around the reader which obeys an idle
|
||||
// timeout. The cancel function is called if the timeout happens
|
||||
func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader {
|
||||
return &timeoutReader{
|
||||
reader: reader,
|
||||
timeout: timeout,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p
|
||||
//
|
||||
// Waits at most for timeout for the read to complete otherwise returns a timeout
|
||||
func (t *timeoutReader) Read(p []byte) (int, error) {
|
||||
// FIXME limit the amount of data read in one chunk so as to not exceed the timeout?
|
||||
// Do the read in the background
|
||||
type result struct {
|
||||
n int
|
||||
err error
|
||||
}
|
||||
done := make(chan result, 1)
|
||||
go func() {
|
||||
n, err := t.reader.Read(p)
|
||||
done <- result{n, err}
|
||||
}()
|
||||
// Wait for the read or the timeout
|
||||
select {
|
||||
case r := <-done:
|
||||
return r.n, r.err
|
||||
case <-time.After(t.timeout):
|
||||
t.cancel()
|
||||
return 0, TimeoutError
|
||||
}
|
||||
panic("unreachable") // for Go 1.0
|
||||
}
|
||||
|
||||
// Close the channel
|
||||
func (t *timeoutReader) Close() error {
|
||||
return t.reader.Close()
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ io.ReadCloser = &timeoutReader{}
|
107
vendor/src/github.com/ncw/swift/timeout_reader_test.go
vendored
Normal file
107
vendor/src/github.com/ncw/swift/timeout_reader_test.go
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
// This tests TimeoutReader
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An io.ReadCloser for testing
|
||||
type testReader struct {
|
||||
sync.Mutex
|
||||
n int
|
||||
delay time.Duration
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Returns n bytes with at time.Duration delay
|
||||
func newTestReader(n int, delay time.Duration) *testReader {
|
||||
return &testReader{
|
||||
n: n,
|
||||
delay: delay,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns 1 byte at a time after delay
|
||||
func (t *testReader) Read(p []byte) (n int, err error) {
|
||||
if t.n <= 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
time.Sleep(t.delay)
|
||||
p[0] = 'A'
|
||||
t.Lock()
|
||||
t.n--
|
||||
t.Unlock()
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// Close the channel
|
||||
func (t *testReader) Close() error {
|
||||
t.Lock()
|
||||
t.closed = true
|
||||
t.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTimeoutReaderNoTimeout(t *testing.T) {
|
||||
test := newTestReader(3, 10*time.Millisecond)
|
||||
cancelled := false
|
||||
cancel := func() {
|
||||
cancelled = true
|
||||
}
|
||||
tr := newTimeoutReader(test, 100*time.Millisecond, cancel)
|
||||
b, err := ioutil.ReadAll(tr)
|
||||
if err != nil || string(b) != "AAA" {
|
||||
t.Fatalf("Bad read %s %s", err, b)
|
||||
}
|
||||
if cancelled {
|
||||
t.Fatal("Cancelled when shouldn't have been")
|
||||
}
|
||||
if test.n != 0 {
|
||||
t.Fatal("Didn't read all")
|
||||
}
|
||||
if test.closed {
|
||||
t.Fatal("Shouldn't be closed")
|
||||
}
|
||||
tr.Close()
|
||||
if !test.closed {
|
||||
t.Fatal("Should be closed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeoutReaderTimeout(t *testing.T) {
|
||||
// Return those bytes slowly so we get an idle timeout
|
||||
test := newTestReader(3, 100*time.Millisecond)
|
||||
cancelled := false
|
||||
cancel := func() {
|
||||
cancelled = true
|
||||
}
|
||||
tr := newTimeoutReader(test, 10*time.Millisecond, cancel)
|
||||
_, err := ioutil.ReadAll(tr)
|
||||
if err != TimeoutError {
|
||||
t.Fatal("Expecting TimeoutError, got", err)
|
||||
}
|
||||
if !cancelled {
|
||||
t.Fatal("Not cancelled when should have been")
|
||||
}
|
||||
test.Lock()
|
||||
n := test.n
|
||||
test.Unlock()
|
||||
if n == 0 {
|
||||
t.Fatal("Read all")
|
||||
}
|
||||
if n != 3 {
|
||||
t.Fatal("Didn't read any")
|
||||
}
|
||||
if test.closed {
|
||||
t.Fatal("Shouldn't be closed")
|
||||
}
|
||||
tr.Close()
|
||||
if !test.closed {
|
||||
t.Fatal("Should be closed")
|
||||
}
|
||||
}
|
22
vendor/src/github.com/ncw/swift/travis_realserver.sh
vendored
Normal file
22
vendor/src/github.com/ncw/swift/travis_realserver.sh
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ ! "${TRAVIS_BRANCH}" = "master" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then
|
||||
echo "Running tests pointing to Rackspace"
|
||||
export SWIFT_API_KEY=$RACKSPACE_APIKEY
|
||||
export SWIFT_API_USER=$RACKSPACE_USER
|
||||
export SWIFT_AUTH_URL=$RACKSPACE_AUTH
|
||||
go test ./...
|
||||
fi
|
||||
|
||||
if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then
|
||||
echo "Running tests pointing to Memset"
|
||||
export SWIFT_API_KEY=$MEMSET_APIKEY
|
||||
export SWIFT_API_USER=$MEMSET_USER
|
||||
export SWIFT_AUTH_URL=$MEMSET_AUTH
|
||||
go test
|
||||
fi
|
55
vendor/src/github.com/ncw/swift/watchdog_reader.go
vendored
Normal file
55
vendor/src/github.com/ncw/swift/watchdog_reader.go
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
var watchdogChunkSize = 1 << 20 // 1 MiB
|
||||
|
||||
// An io.Reader which resets a watchdog timer whenever data is read
|
||||
type watchdogReader struct {
|
||||
timeout time.Duration
|
||||
reader io.Reader
|
||||
timer *time.Timer
|
||||
chunkSize int
|
||||
}
|
||||
|
||||
// Returns a new reader which will kick the watchdog timer whenever data is read
|
||||
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
|
||||
return &watchdogReader{
|
||||
timeout: timeout,
|
||||
reader: reader,
|
||||
timer: timer,
|
||||
chunkSize: watchdogChunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p
|
||||
func (t *watchdogReader) Read(p []byte) (int, error) {
|
||||
//read from underlying reader in chunks not larger than t.chunkSize
|
||||
//while resetting the watchdog timer before every read; the small chunk
|
||||
//size ensures that the timer does not fire when reading a large amount of
|
||||
//data from a slow connection
|
||||
start := 0
|
||||
end := len(p)
|
||||
for start < end {
|
||||
length := end - start
|
||||
if length > t.chunkSize {
|
||||
length = t.chunkSize
|
||||
}
|
||||
|
||||
resetTimer(t.timer, t.timeout)
|
||||
n, err := t.reader.Read(p[start : start+length])
|
||||
start += n
|
||||
if n == 0 || err != nil {
|
||||
return start, err
|
||||
}
|
||||
}
|
||||
|
||||
resetTimer(t.timer, t.timeout)
|
||||
return start, nil
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ io.Reader = &watchdogReader{}
|
137
vendor/src/github.com/ncw/swift/watchdog_reader_test.go
vendored
Normal file
137
vendor/src/github.com/ncw/swift/watchdog_reader_test.go
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
// This tests WatchdogReader
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Uses testReader from timeout_reader_test.go
|
||||
|
||||
func testWatchdogReaderTimeout(t *testing.T, initialTimeout, watchdogTimeout time.Duration, expectedTimeout bool) {
|
||||
test := newTestReader(3, 10*time.Millisecond)
|
||||
timer, firedChan := setupTimer(initialTimeout)
|
||||
wr := newWatchdogReader(test, watchdogTimeout, timer)
|
||||
b, err := ioutil.ReadAll(wr)
|
||||
if err != nil || string(b) != "AAA" {
|
||||
t.Fatalf("Bad read %s %s", err, b)
|
||||
}
|
||||
checkTimer(t, firedChan, expectedTimeout)
|
||||
}
|
||||
|
||||
func setupTimer(initialTimeout time.Duration) (timer *time.Timer, fired <-chan bool) {
|
||||
timer = time.NewTimer(initialTimeout)
|
||||
firedChan := make(chan bool)
|
||||
started := make(chan bool)
|
||||
go func() {
|
||||
started <- true
|
||||
select {
|
||||
case <-timer.C:
|
||||
firedChan <- true
|
||||
}
|
||||
}()
|
||||
<-started
|
||||
return timer, firedChan
|
||||
}
|
||||
|
||||
func checkTimer(t *testing.T, firedChan <-chan bool, expectedTimeout bool) {
|
||||
fired := false
|
||||
select {
|
||||
case fired = <-firedChan:
|
||||
default:
|
||||
}
|
||||
if expectedTimeout {
|
||||
if !fired {
|
||||
t.Fatal("Timer should have fired")
|
||||
}
|
||||
} else {
|
||||
if fired {
|
||||
t.Fatal("Timer should not have fired")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchdogReaderNoTimeout(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 100*time.Millisecond, 100*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestWatchdogReaderTimeout(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 5*time.Millisecond, 5*time.Millisecond, true)
|
||||
}
|
||||
|
||||
func TestWatchdogReaderNoTimeoutShortInitial(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 5*time.Millisecond, 100*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestWatchdogReaderTimeoutLongInitial(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 100*time.Millisecond, 5*time.Millisecond, true)
|
||||
}
|
||||
|
||||
//slowReader simulates reading from a slow network connection by introducing a delay
|
||||
//in each Read() proportional to the amount of bytes read.
|
||||
type slowReader struct {
|
||||
reader io.Reader
|
||||
delayPerByte time.Duration
|
||||
}
|
||||
|
||||
func (r *slowReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.reader.Read(p)
|
||||
if n > 0 {
|
||||
time.Sleep(time.Duration(n) * r.delayPerByte)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//This test verifies that the watchdogReader's timeout is not triggered by data
|
||||
//that comes in very slowly. (It should only be triggered if no data arrives at
|
||||
//all.)
|
||||
func TestWatchdogReaderOnSlowNetwork(t *testing.T) {
|
||||
byteString := make([]byte, 8*watchdogChunkSize)
|
||||
reader := &slowReader{
|
||||
reader: bytes.NewReader(byteString),
|
||||
//reading everything at once would take 100 ms, which is longer than the
|
||||
//watchdog timeout below
|
||||
delayPerByte: 200 * time.Millisecond / time.Duration(len(byteString)),
|
||||
}
|
||||
|
||||
timer, firedChan := setupTimer(10 * time.Millisecond)
|
||||
wr := newWatchdogReader(reader, 190*time.Millisecond, timer)
|
||||
|
||||
//use io.ReadFull instead of ioutil.ReadAll here because ReadAll already does
|
||||
//some chunking that would keep this testcase from failing
|
||||
b := make([]byte, len(byteString))
|
||||
n, err := io.ReadFull(wr, b)
|
||||
if err != nil || n != len(b) || !bytes.Equal(b, byteString) {
|
||||
t.Fatal("Bad read %s %d", err, n)
|
||||
}
|
||||
|
||||
checkTimer(t, firedChan, false)
|
||||
}
|
||||
|
||||
//This test verifies that the watchdogReader's chunking logic does not mess up
|
||||
//the byte strings that are read.
|
||||
func TestWatchdogReaderValidity(t *testing.T) {
|
||||
byteString := []byte("abcdefghij")
|
||||
//make a reader with a non-standard chunk size (1 MiB would be much too huge
|
||||
//to comfortably look at the bytestring that comes out of the reader)
|
||||
wr := &watchdogReader{
|
||||
reader: bytes.NewReader(byteString),
|
||||
chunkSize: 3, //len(byteString) % chunkSize != 0 to be extra rude :)
|
||||
//don't care about the timeout stuff here
|
||||
timeout: 5 * time.Minute,
|
||||
timer: time.NewTimer(5 * time.Minute),
|
||||
}
|
||||
|
||||
b := make([]byte, len(byteString))
|
||||
n, err := io.ReadFull(wr, b)
|
||||
if err != nil || n != len(b) {
|
||||
t.Fatal("Read error: %s", err)
|
||||
}
|
||||
if !bytes.Equal(b, byteString) {
|
||||
t.Fatal("Bad read: %#v != %#v", string(b), string(byteString))
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue