forked from TrueCloudLab/rclone
Switch to using the dep tool and update all the dependencies
This commit is contained in:
parent
5135ff73cb
commit
98c2d2c41b
5321 changed files with 4483201 additions and 5922 deletions
115
vendor/github.com/ncw/go-acd/account_test.go
generated
vendored
Normal file
115
vendor/github.com/ncw/go-acd/account_test.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Copyright (c) 2015 Serge Gebhardt. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by the ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAccount_getInfo(t *testing.T) {
|
||||
r := *NewMockResponseOkString(`{ "termsOfUse": "1.0.0", "status": "ACTIVE" }`)
|
||||
c := NewMockClient(r)
|
||||
|
||||
info, _, err := c.Account.GetInfo()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "ACTIVE", *info.Status)
|
||||
assert.Equal(t, "1.0.0", *info.TermsOfUse)
|
||||
}
|
||||
|
||||
func TestAccount_getQuota(t *testing.T) {
|
||||
r := *NewMockResponseOkString(`
|
||||
{
|
||||
"quota": 5368709120,
|
||||
"lastCalculated": "2014-08-13T23:01:47.479Z",
|
||||
"available": 4069088896
|
||||
}
|
||||
`)
|
||||
c := NewMockClient(r)
|
||||
|
||||
quota, _, err := c.Account.GetQuota()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "2014-08-13 23:01:47.479 +0000 UTC", quota.LastCalculated.String())
|
||||
assert.Equal(t, uint64(5368709120), *quota.Quota)
|
||||
assert.Equal(t, uint64(4069088896), *quota.Available)
|
||||
}
|
||||
|
||||
func TestAccount_getUsage(t *testing.T) {
|
||||
r := *NewMockResponseOkString(`
|
||||
{
|
||||
"lastCalculated":"2014-08-13T23:17:41.365Z",
|
||||
"other":{
|
||||
"total":{
|
||||
"bytes":29999771,
|
||||
"count":871
|
||||
},
|
||||
"billable":{
|
||||
"bytes":29999771,
|
||||
"count":871
|
||||
}
|
||||
},
|
||||
"doc":{
|
||||
"total":{
|
||||
"bytes":807170,
|
||||
"count":10
|
||||
},
|
||||
"billable":{
|
||||
"bytes":807170,
|
||||
"count":10
|
||||
}
|
||||
},
|
||||
"photo":{
|
||||
"total":{
|
||||
"bytes":9477988,
|
||||
"count":25
|
||||
},
|
||||
"billable":{
|
||||
"bytes":9477988,
|
||||
"count":25
|
||||
}
|
||||
},
|
||||
"video":{
|
||||
"total":{
|
||||
"bytes":23524252,
|
||||
"count":22
|
||||
},
|
||||
"billable":{
|
||||
"bytes":23524252,
|
||||
"count":22
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
c := NewMockClient(r)
|
||||
|
||||
usage, _, err := c.Account.GetUsage()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "2014-08-13 23:17:41.365 +0000 UTC", usage.LastCalculated.String())
|
||||
|
||||
assert.Equal(t, uint64(29999771), *usage.Other.Total.Bytes)
|
||||
assert.Equal(t, uint64(871), *usage.Other.Total.Count)
|
||||
assert.Equal(t, uint64(29999771), *usage.Other.Billable.Bytes)
|
||||
assert.Equal(t, uint64(871), *usage.Other.Billable.Count)
|
||||
|
||||
assert.Equal(t, uint64(807170), *usage.Doc.Total.Bytes)
|
||||
assert.Equal(t, uint64(10), *usage.Doc.Total.Count)
|
||||
assert.Equal(t, uint64(807170), *usage.Doc.Billable.Bytes)
|
||||
assert.Equal(t, uint64(10), *usage.Doc.Billable.Count)
|
||||
|
||||
assert.Equal(t, uint64(9477988), *usage.Photo.Total.Bytes)
|
||||
assert.Equal(t, uint64(25), *usage.Photo.Total.Count)
|
||||
assert.Equal(t, uint64(9477988), *usage.Photo.Billable.Bytes)
|
||||
assert.Equal(t, uint64(25), *usage.Photo.Billable.Count)
|
||||
|
||||
assert.Equal(t, uint64(23524252), *usage.Video.Total.Bytes)
|
||||
assert.Equal(t, uint64(22), *usage.Video.Total.Count)
|
||||
assert.Equal(t, uint64(23524252), *usage.Video.Billable.Bytes)
|
||||
assert.Equal(t, uint64(22), *usage.Video.Billable.Count)
|
||||
}
|
56
vendor/github.com/ncw/go-acd/client_mock_test.go
generated
vendored
Normal file
56
vendor/github.com/ncw/go-acd/client_mock_test.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright (c) 2015 Serge Gebhardt. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by the ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// MockResponse is a static HTTP response.
|
||||
type MockResponse struct {
|
||||
Code int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// NewMockResponseOkString creates a new MockResponse with Code 200 (OK)
|
||||
// and Body built from string argument
|
||||
func NewMockResponseOkString(response string) *MockResponse {
|
||||
return &MockResponse{
|
||||
Code: 200,
|
||||
Body: []byte(response),
|
||||
}
|
||||
}
|
||||
|
||||
// mockTransport is a mocked Transport that always returns the same MockResponse.
|
||||
type mockTransport struct {
|
||||
resp MockResponse
|
||||
}
|
||||
|
||||
// Satisfies the RoundTripper interface.
|
||||
func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
r := http.Response{
|
||||
StatusCode: t.resp.Code,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
}
|
||||
|
||||
if len(t.resp.Body) > 0 {
|
||||
buf := bytes.NewBuffer(t.resp.Body)
|
||||
r.Body = ioutil.NopCloser(buf)
|
||||
}
|
||||
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// MockClient is a mocked Client that is used for tests.
|
||||
func NewMockClient(response MockResponse) *Client {
|
||||
t := &mockTransport{resp: response}
|
||||
c := &http.Client{Transport: t}
|
||||
return NewClient(c)
|
||||
}
|
157
vendor/github.com/ncw/go-acd/nodes_test.go
generated
vendored
Normal file
157
vendor/github.com/ncw/go-acd/nodes_test.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright (c) 2015 Serge Gebhardt. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by the ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNode_getRoot(t *testing.T) {
|
||||
r := *NewMockResponseOkString(`
|
||||
{
|
||||
"count":1,
|
||||
"data":[
|
||||
{
|
||||
"isRoot":true,
|
||||
"eTagResponse":"LMel82iwbNk",
|
||||
"id":"3ohaT2SSQWOecmP0GSWv6g",
|
||||
"kind":"FOLDER",
|
||||
"version":156,
|
||||
"labels":[
|
||||
|
||||
],
|
||||
"createdDate":"2014-04-08T20:58:58.011Z",
|
||||
"createdBy":"CloudDriveFiles",
|
||||
"restricted":false,
|
||||
"modifiedDate":"2015-05-03T16:12:35.394Z",
|
||||
"isShared":false,
|
||||
"parents":[
|
||||
|
||||
],
|
||||
"status":"AVAILABLE"
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
c := NewMockClient(r)
|
||||
|
||||
root, _, err := c.Nodes.GetRoot()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "3ohaT2SSQWOecmP0GSWv6g", *root.Id)
|
||||
assert.Nil(t, root.Name)
|
||||
}
|
||||
|
||||
func TestNode_getNodes(t *testing.T) {
|
||||
r := *NewMockResponseOkString(`
|
||||
{
|
||||
"count":2,
|
||||
"nextToken":"kgkbpodpt6",
|
||||
"data":[
|
||||
{
|
||||
"eTagResponse":"eodh1-sfNbMI",
|
||||
"id":"eRkZ6YMuX5W3VqV3Ia7_lf",
|
||||
"name":"fooNew.jpg",
|
||||
"kind":"FILE",
|
||||
"metadataVersion":1,
|
||||
"modifiedDate":"2014-03-07T22:31:12.173Z",
|
||||
"creationDate":"2014-03-07T22:31:12.173Z",
|
||||
"labels":[
|
||||
"PHOTO"
|
||||
],
|
||||
"description":"My Awesome Photo",
|
||||
"createdBy":"ApplicationId1",
|
||||
"parents":[
|
||||
"foo1",
|
||||
"123"
|
||||
],
|
||||
"status":"Available",
|
||||
"restricted":false,
|
||||
"size":56654,
|
||||
"contentType":"image/jpeg",
|
||||
"md5":"6df23dc03f9b54cc38a0fc1483df6e21",
|
||||
"fileExtension":"jpeg",
|
||||
"contentProperties":{
|
||||
"image":{
|
||||
"make":"SAMSUNG",
|
||||
"model":"SAMSUNG-SGH-I747",
|
||||
"exposureTime":"1/1780",
|
||||
"dateTimeOriginal":"2012-08-25T14:23:24.000Z",
|
||||
"flash":"No",
|
||||
"focalLength":"37/10",
|
||||
"dateTime":"2012-08-25T14:23:24.000Z",
|
||||
"dateTimeDigitized":"2012-08-25T14:23:24.000Z",
|
||||
"software":"I747UCALG1",
|
||||
"orientation":"1",
|
||||
"colorSpace":"sRGB",
|
||||
"meteringMode":"CenterWeightedAverage",
|
||||
"exposureProgram":"Aperture Priority",
|
||||
"exposureMode":"Auto Exposure",
|
||||
"whiteBalance":"Auto",
|
||||
"sensingMethod":"One-chip color area",
|
||||
"xResolution":"72",
|
||||
"yResolution":"72",
|
||||
"resolutionUnit":"Pixels/Inch"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"eTagResponse":"sdgrrtbbfdd",
|
||||
"id":"fooo1",
|
||||
"name":"foo.zip",
|
||||
"kind":"FILE",
|
||||
"metadataVersion":1,
|
||||
"modifiedDate":"2014-03-07T22:31:12.173Z",
|
||||
"creationDate":"2014-03-07T22:31:12.173Z",
|
||||
"labels":[
|
||||
"ZIP File"
|
||||
],
|
||||
"description":"All My Data",
|
||||
"createdBy":"ApplicationId2",
|
||||
"status":"Available",
|
||||
"restricted":false,
|
||||
"size":5665423,
|
||||
"contentType":"application/octet-stream",
|
||||
"md5":"6df23dc03f9b54cc38a0fc1483df6e23",
|
||||
"fileExtension":"zip"
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
c := NewMockClient(r)
|
||||
opts := &NodeListOptions{}
|
||||
|
||||
nodes, _, err := c.Nodes.GetNodes(opts)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "kgkbpodpt6", opts.StartToken)
|
||||
assert.Equal(t, 2, len(nodes))
|
||||
|
||||
assert.Equal(t, "eRkZ6YMuX5W3VqV3Ia7_lf", *nodes[0].Id)
|
||||
assert.Equal(t, "fooNew.jpg", *nodes[0].Name)
|
||||
|
||||
assert.Equal(t, "fooo1", *nodes[1].Id)
|
||||
assert.Equal(t, "foo.zip", *nodes[1].Name)
|
||||
}
|
||||
|
||||
func TestEscapeForFilter(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"potato", "potato"},
|
||||
{`potato+sausage`, `potato\+sausage`},
|
||||
{`+ - & | ! ( ) { } [ ] ^ ' " ~ * ? : \`, `\+\ \-\ \&\ \|\ \!\ \(\ \)\ \{\ \}\ \[\ \]\ \^\ \'\ \"\ \~\ \*\ \?\ \:\ \\`},
|
||||
} {
|
||||
got := EscapeForFilter(test.in)
|
||||
if test.want != got {
|
||||
t.Errorf("in(%q): want '%s' got '%s'", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
22
vendor/github.com/ncw/swift/.travis.yml
generated
vendored
22
vendor/github.com/ncw/swift/.travis.yml
generated
vendored
|
@ -5,10 +5,26 @@ go:
|
|||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5.1
|
||||
- tip
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.4
|
||||
- 1.7.5
|
||||
- 1.8
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.8
|
||||
env: TEST_REAL_SERVER=rackspace
|
||||
- go: 1.8
|
||||
env: TEST_REAL_SERVER=memset
|
||||
allow_failures:
|
||||
- go: 1.8
|
||||
env: TEST_REAL_SERVER=rackspace
|
||||
- go: 1.8
|
||||
env: TEST_REAL_SERVER=memset
|
||||
install: go test -i ./...
|
||||
script:
|
||||
- test -z "$(go fmt ./...)"
|
||||
- go test
|
||||
- ./travis_realserver.sh
|
||||
|
|
136
vendor/github.com/ncw/swift/dlo.go
generated
vendored
Normal file
136
vendor/github.com/ncw/swift/dlo.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// DynamicLargeObjectCreateFile represents an open static large object
|
||||
type DynamicLargeObjectCreateFile struct {
|
||||
largeObjectCreateFile
|
||||
}
|
||||
|
||||
// DynamicLargeObjectCreateFile creates a dynamic large object
|
||||
// returning an object which satisfies io.Writer, io.Seeker, io.Closer
|
||||
// and io.ReaderFrom. The flags are as passes to the
|
||||
// largeObjectCreate method.
|
||||
func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
lo, err := c.largeObjectCreate(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return withBuffer(opts, &DynamicLargeObjectCreateFile{
|
||||
largeObjectCreateFile: *lo,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// DynamicLargeObjectCreate creates or truncates an existing dynamic
|
||||
// large object returning a writeable object. This sets opts.Flags to
|
||||
// an appropriate value before calling DynamicLargeObjectCreateFile
|
||||
func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
||||
return c.DynamicLargeObjectCreateFile(opts)
|
||||
}
|
||||
|
||||
// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
|
||||
func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
|
||||
return c.LargeObjectDelete(container, path)
|
||||
}
|
||||
|
||||
// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
||||
func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
||||
info, headers, err := c.Object(dstContainer, srcObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
|
||||
if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDLOManifest creates a dynamic large object manifest
|
||||
func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error {
|
||||
headers := make(Headers)
|
||||
headers["X-Object-Manifest"] = prefix
|
||||
manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := manifest.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close satisfies the io.Closer interface
|
||||
func (file *DynamicLargeObjectCreateFile) Close() error {
|
||||
return file.Flush()
|
||||
}
|
||||
|
||||
func (file *DynamicLargeObjectCreateFile) Flush() error {
|
||||
err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
||||
}
|
||||
|
||||
func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) {
|
||||
//a simple container listing works 99.9% of the time
|
||||
segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasObjectName := make(map[string]struct{})
|
||||
for _, segment := range segments {
|
||||
hasObjectName[segment.Name] = struct{}{}
|
||||
}
|
||||
|
||||
//The container listing might be outdated (i.e. not contain all existing
|
||||
//segment objects yet) because of temporary inconsistency (Swift is only
|
||||
//eventually consistent!). Check its completeness.
|
||||
segmentNumber := 0
|
||||
for {
|
||||
segmentNumber++
|
||||
segmentName := getSegment(segmentPath, segmentNumber)
|
||||
if _, seen := hasObjectName[segmentName]; seen {
|
||||
continue
|
||||
}
|
||||
|
||||
//This segment is missing in the container listing. Use a more reliable
|
||||
//request to check its existence. (HEAD requests on segments are
|
||||
//guaranteed to return the correct metadata, except for the pathological
|
||||
//case of an outage of large parts of the Swift cluster or its network,
|
||||
//since every segment is only written once.)
|
||||
segment, _, err := c.Object(segmentContainer, segmentName)
|
||||
switch err {
|
||||
case nil:
|
||||
//found new segment -> add it in the correct position and keep
|
||||
//going, more might be missing
|
||||
if segmentNumber <= len(segments) {
|
||||
segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
|
||||
segments[segmentNumber-1] = segment
|
||||
} else {
|
||||
segments = append(segments, segment)
|
||||
}
|
||||
continue
|
||||
case ObjectNotFound:
|
||||
//This segment is missing. Since we upload segments sequentially,
|
||||
//there won't be any more segments after it.
|
||||
return segments, nil
|
||||
default:
|
||||
return nil, err //unexpected error
|
||||
}
|
||||
}
|
||||
}
|
109
vendor/github.com/ncw/swift/example_test.go
generated
vendored
Normal file
109
vendor/github.com/ncw/swift/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
// Copyright...
|
||||
|
||||
// This example demonstrates opening a Connection and doing some basic operations.
|
||||
package swift_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
func ExampleConnection() {
|
||||
// Create a v1 auth connection
|
||||
c := &swift.Connection{
|
||||
// This should be your username
|
||||
UserName: "user",
|
||||
// This should be your api key
|
||||
ApiKey: "key",
|
||||
// This should be a v1 auth url, eg
|
||||
// Rackspace US https://auth.api.rackspacecloud.com/v1.0
|
||||
// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0
|
||||
// Memset Memstore UK https://auth.storage.memset.com/v1.0
|
||||
AuthUrl: "auth_url",
|
||||
}
|
||||
|
||||
// Authenticate
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// List all the containers
|
||||
containers, err := c.ContainerNames(nil)
|
||||
fmt.Println(containers)
|
||||
// etc...
|
||||
|
||||
// ------ or alternatively create a v2 connection ------
|
||||
|
||||
// Create a v2 auth connection
|
||||
c = &swift.Connection{
|
||||
// This is the sub user for the storage - eg "admin"
|
||||
UserName: "user",
|
||||
// This should be your api key
|
||||
ApiKey: "key",
|
||||
// This should be a version2 auth url, eg
|
||||
// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0
|
||||
// Memset Memstore v2 https://auth.storage.memset.com/v2.0
|
||||
AuthUrl: "v2_auth_url",
|
||||
// Region to use - default is use first region if unset
|
||||
Region: "LON",
|
||||
// Name of the tenant - this is likely your username
|
||||
Tenant: "jim",
|
||||
}
|
||||
|
||||
// as above...
|
||||
}
|
||||
|
||||
var container string
|
||||
|
||||
func ExampleConnection_ObjectsWalk() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
objects := make([]string, 0)
|
||||
err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
newObjects, err := c.ObjectNames(container, opts)
|
||||
if err == nil {
|
||||
objects = append(objects, newObjects...)
|
||||
}
|
||||
return newObjects, err
|
||||
})
|
||||
fmt.Println("Found all the objects", objects, err)
|
||||
}
|
||||
|
||||
func ExampleConnection_VersionContainerCreate() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
// Use the helper method to create the current and versions container.
|
||||
if err := c.VersionContainerCreate("cds", "cd-versions"); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleConnection_VersionEnable() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
// Build the containers manually and enable them.
|
||||
if err := c.ContainerCreate("movie-versions", nil); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
if err := c.ContainerCreate("movies", nil); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
if err := c.VersionEnable("movies", "movie-versions"); err != nil {
|
||||
fmt.Print(err.Error())
|
||||
}
|
||||
|
||||
// Access the primary container as usual with ObjectCreate(), ObjectPut(), etc.
|
||||
// etc...
|
||||
}
|
||||
|
||||
func ExampleConnection_VersionDisable() {
|
||||
c, rollback := makeConnection(nil)
|
||||
defer rollback()
|
||||
|
||||
// Disable versioning on a container. Note that this does not delete the versioning container.
|
||||
c.VersionDisable("movies")
|
||||
}
|
445
vendor/github.com/ncw/swift/largeobjects.go
generated
vendored
Normal file
445
vendor/github.com/ncw/swift/largeobjects.go
generated
vendored
Normal file
|
@ -0,0 +1,445 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
gopath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NotLargeObject is returned if an operation is performed on an object which isn't large.
|
||||
var NotLargeObject = errors.New("Not a large object")
|
||||
|
||||
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
|
||||
var readAfterWriteTimeout = 15 * time.Second
|
||||
|
||||
// readAfterWriteWait defines the time to sleep between two retries
|
||||
var readAfterWriteWait = 200 * time.Millisecond
|
||||
|
||||
// largeObjectCreateFile represents an open static or dynamic large object
|
||||
type largeObjectCreateFile struct {
|
||||
conn *Connection
|
||||
container string
|
||||
objectName string
|
||||
currentLength int64
|
||||
filePos int64
|
||||
chunkSize int64
|
||||
segmentContainer string
|
||||
prefix string
|
||||
contentType string
|
||||
checkHash bool
|
||||
segments []Object
|
||||
headers Headers
|
||||
minChunkSize int64
|
||||
}
|
||||
|
||||
func swiftSegmentPath(path string) (string, error) {
|
||||
checksum := sha1.New()
|
||||
random := make([]byte, 32)
|
||||
if _, err := rand.Read(random); err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
|
||||
return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
|
||||
}
|
||||
|
||||
func getSegment(segmentPath string, partNumber int) string {
|
||||
return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
|
||||
}
|
||||
|
||||
func parseFullPath(manifest string) (container string, prefix string) {
|
||||
components := strings.SplitN(manifest, "/", 2)
|
||||
container = components[0]
|
||||
if len(components) > 1 {
|
||||
prefix = components[1]
|
||||
}
|
||||
return container, prefix
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObjectDLO() bool {
|
||||
_, isDLO := headers["X-Object-Manifest"]
|
||||
return isDLO
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObjectSLO() bool {
|
||||
_, isSLO := headers["X-Static-Large-Object"]
|
||||
return isSLO
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObject() bool {
|
||||
return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
|
||||
}
|
||||
|
||||
func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) {
|
||||
if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
|
||||
segmentContainer, segmentPath := parseFullPath(manifest)
|
||||
segments, err := c.getAllDLOSegments(segmentContainer, segmentPath)
|
||||
return segmentContainer, segments, err
|
||||
}
|
||||
if headers.IsLargeObjectSLO() {
|
||||
return c.getAllSLOSegments(container, path)
|
||||
}
|
||||
return "", nil, NotLargeObject
|
||||
}
|
||||
|
||||
// LargeObjectOpts describes how a large object should be created
|
||||
type LargeObjectOpts struct {
|
||||
Container string // Name of container to place object
|
||||
ObjectName string // Name of object
|
||||
Flags int // Creation flags
|
||||
CheckHash bool // If set Check the hash
|
||||
Hash string // If set use this hash to check
|
||||
ContentType string // Content-Type of the object
|
||||
Headers Headers // Additional headers to upload the object with
|
||||
ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
|
||||
MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
|
||||
SegmentContainer string // Name of the container to place segments
|
||||
SegmentPrefix string // Prefix to use for the segments
|
||||
NoBuffer bool // Prevents using a bufio.Writer to write segments
|
||||
}
|
||||
|
||||
type LargeObjectFile interface {
|
||||
io.Writer
|
||||
io.Seeker
|
||||
io.Closer
|
||||
Size() int64
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
|
||||
//
|
||||
// opts.Flags can have the following bits set
|
||||
// os.TRUNC - remove the contents of the large object if it exists
|
||||
// os.APPEND - write at the end of the large object
|
||||
func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
|
||||
var (
|
||||
segmentPath string
|
||||
segmentContainer string
|
||||
segments []Object
|
||||
currentLength int64
|
||||
err error
|
||||
)
|
||||
|
||||
if opts.SegmentPrefix != "" {
|
||||
segmentPath = opts.SegmentPrefix
|
||||
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
|
||||
if opts.Flags&os.O_TRUNC != 0 {
|
||||
c.LargeObjectDelete(opts.Container, opts.ObjectName)
|
||||
} else {
|
||||
currentLength = info.Bytes
|
||||
if headers.IsLargeObject() {
|
||||
segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(segments) > 0 {
|
||||
segmentPath = gopath.Dir(segments[0].Name)
|
||||
}
|
||||
} else {
|
||||
if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segments = append(segments, info)
|
||||
}
|
||||
}
|
||||
} else if err != ObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// segmentContainer is not empty when the manifest already existed
|
||||
if segmentContainer == "" {
|
||||
if opts.SegmentContainer != "" {
|
||||
segmentContainer = opts.SegmentContainer
|
||||
} else {
|
||||
segmentContainer = opts.Container + "_segments"
|
||||
}
|
||||
}
|
||||
|
||||
file := &largeObjectCreateFile{
|
||||
conn: c,
|
||||
checkHash: opts.CheckHash,
|
||||
container: opts.Container,
|
||||
objectName: opts.ObjectName,
|
||||
chunkSize: opts.ChunkSize,
|
||||
minChunkSize: opts.MinChunkSize,
|
||||
headers: opts.Headers,
|
||||
segmentContainer: segmentContainer,
|
||||
prefix: segmentPath,
|
||||
segments: segments,
|
||||
currentLength: currentLength,
|
||||
}
|
||||
|
||||
if file.chunkSize == 0 {
|
||||
file.chunkSize = 10 * 1024 * 1024
|
||||
}
|
||||
|
||||
if file.minChunkSize > file.chunkSize {
|
||||
file.chunkSize = file.minChunkSize
|
||||
}
|
||||
|
||||
if opts.Flags&os.O_APPEND != 0 {
|
||||
file.filePos = currentLength
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// LargeObjectDelete deletes the large object named by container, path
|
||||
func (c *Connection) LargeObjectDelete(container string, objectName string) error {
|
||||
_, headers, err := c.Object(container, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var objects [][]string
|
||||
if headers.IsLargeObject() {
|
||||
segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range segments {
|
||||
objects = append(objects, []string{segmentContainer, obj.Name})
|
||||
}
|
||||
}
|
||||
objects = append(objects, []string{container, objectName})
|
||||
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
|
||||
filenames := make([]string, len(objects))
|
||||
for i, obj := range objects {
|
||||
filenames[i] = obj[0] + "/" + obj[1]
|
||||
}
|
||||
_, err = c.doBulkDelete(filenames)
|
||||
// Don't fail on ObjectNotFound because eventual consistency
|
||||
// makes this situation normal.
|
||||
if err != nil && err != Forbidden && err != ObjectNotFound {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for _, obj := range objects {
|
||||
if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LargeObjectGetSegments returns all the segments that compose an object
|
||||
// If the object is a Dynamic Large Object (DLO), it just returns the objects
|
||||
// that have the prefix as indicated by the manifest.
|
||||
// If the object is a Static Large Object (SLO), it retrieves the JSON content
|
||||
// of the manifest and return all the segments of it.
|
||||
func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) {
|
||||
_, headers, err := c.Object(container, path)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return c.getAllSegments(container, path, headers)
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next write operation
|
||||
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case 0:
|
||||
file.filePos = offset
|
||||
case 1:
|
||||
file.filePos += offset
|
||||
case 2:
|
||||
file.filePos = file.currentLength + offset
|
||||
default:
|
||||
return -1, fmt.Errorf("invalid value for whence")
|
||||
}
|
||||
if file.filePos < 0 {
|
||||
return -1, fmt.Errorf("negative offset")
|
||||
}
|
||||
return file.filePos, nil
|
||||
}
|
||||
|
||||
func (file *largeObjectCreateFile) Size() int64 {
|
||||
return file.currentLength
|
||||
}
|
||||
|
||||
func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
|
||||
waitingTime := readAfterWriteWait
|
||||
endTimer := time.After(readAfterWriteTimeout)
|
||||
for {
|
||||
var headers Headers
|
||||
var sz int64
|
||||
if headers, sz, err = fn(); err == nil {
|
||||
if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-endTimer:
|
||||
err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
|
||||
return
|
||||
case <-time.After(waitingTime):
|
||||
waitingTime *= 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) {
|
||||
err = withLORetry(expectedSize, func() (Headers, int64, error) {
|
||||
var info Object
|
||||
var headers Headers
|
||||
info, headers, err = c.objectBase(container, objectName)
|
||||
if err != nil {
|
||||
return headers, 0, err
|
||||
}
|
||||
return headers, info.Bytes, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface
|
||||
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
|
||||
var sz int64
|
||||
var relativeFilePos int
|
||||
writeSegmentIdx := 0
|
||||
for i, obj := range file.segments {
|
||||
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
|
||||
relativeFilePos = int(file.filePos - sz)
|
||||
break
|
||||
}
|
||||
writeSegmentIdx++
|
||||
sz += obj.Bytes
|
||||
}
|
||||
sizeToWrite := len(buf)
|
||||
for offset := 0; offset < sizeToWrite; {
|
||||
newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if writeSegmentIdx < len(file.segments) {
|
||||
file.segments[writeSegmentIdx] = *newSegment
|
||||
} else {
|
||||
file.segments = append(file.segments, *newSegment)
|
||||
}
|
||||
offset += n
|
||||
writeSegmentIdx++
|
||||
relativeFilePos = 0
|
||||
}
|
||||
file.filePos += int64(sizeToWrite)
|
||||
file.currentLength = 0
|
||||
for _, obj := range file.segments {
|
||||
file.currentLength += obj.Bytes
|
||||
}
|
||||
return sizeToWrite, nil
|
||||
}
|
||||
|
||||
func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) {
|
||||
var (
|
||||
readers []io.Reader
|
||||
existingSegment *Object
|
||||
segmentSize int
|
||||
)
|
||||
segmentName := getSegment(file.prefix, writeSegmentIdx+1)
|
||||
sizeToRead := int(file.chunkSize)
|
||||
if writeSegmentIdx < len(file.segments) {
|
||||
existingSegment = &file.segments[writeSegmentIdx]
|
||||
if writeSegmentIdx != len(file.segments)-1 {
|
||||
sizeToRead = int(existingSegment.Bytes)
|
||||
}
|
||||
if relativeFilePos > 0 {
|
||||
headers := make(Headers)
|
||||
headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
|
||||
existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer existingSegmentReader.Close()
|
||||
sizeToRead -= relativeFilePos
|
||||
segmentSize += relativeFilePos
|
||||
readers = []io.Reader{existingSegmentReader}
|
||||
}
|
||||
}
|
||||
if sizeToRead > len(buf) {
|
||||
sizeToRead = len(buf)
|
||||
}
|
||||
segmentSize += sizeToRead
|
||||
readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
|
||||
if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
|
||||
headers := make(Headers)
|
||||
headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
|
||||
tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tailSegmentReader.Close()
|
||||
segmentSize = int(existingSegment.Bytes)
|
||||
readers = append(readers, tailSegmentReader)
|
||||
}
|
||||
segmentReader := io.MultiReader(readers...)
|
||||
headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
|
||||
}
|
||||
|
||||
func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
|
||||
if !opts.NoBuffer {
|
||||
return &bufferedLargeObjectFile{
|
||||
LargeObjectFile: lo,
|
||||
bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
|
||||
}
|
||||
}
|
||||
return lo
|
||||
}
|
||||
|
||||
type bufferedLargeObjectFile struct {
|
||||
LargeObjectFile
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Close() error {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return blo.LargeObjectFile.Close()
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
|
||||
return blo.bw.Write(p)
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return blo.LargeObjectFile.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Size() int64 {
|
||||
return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Flush() error {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return blo.LargeObjectFile.Flush()
|
||||
}
|
213
vendor/github.com/ncw/swift/meta_test.go
generated
vendored
Normal file
213
vendor/github.com/ncw/swift/meta_test.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
// Tests for swift metadata
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHeadersToMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadersToAccountMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadersToContainerMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadersToObjectMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToAccountHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToContainerHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMetadataToObjectHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNsToFloatString(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns int64
|
||||
fs string
|
||||
}{
|
||||
{0, "0"},
|
||||
{1, "0.000000001"},
|
||||
{1000, "0.000001"},
|
||||
{1000000, "0.001"},
|
||||
{100000000, "0.1"},
|
||||
{1000000000, "1"},
|
||||
{10000000000, "10"},
|
||||
{12345678912, "12.345678912"},
|
||||
{12345678910, "12.34567891"},
|
||||
{12345678900, "12.3456789"},
|
||||
{12345678000, "12.345678"},
|
||||
{12345670000, "12.34567"},
|
||||
{12345600000, "12.3456"},
|
||||
{12345000000, "12.345"},
|
||||
{12340000000, "12.34"},
|
||||
{12300000000, "12.3"},
|
||||
{12000000000, "12"},
|
||||
{10000000000, "10"},
|
||||
{1347717491123123123, "1347717491.123123123"},
|
||||
} {
|
||||
if nsToFloatString(d.ns) != d.fs {
|
||||
t.Error("Failed", d.ns, "!=", d.fs)
|
||||
}
|
||||
if d.ns > 0 && nsToFloatString(-d.ns) != "-"+d.fs {
|
||||
t.Error("Failed on negative", d.ns, "!=", d.fs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatStringToNs(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns int64
|
||||
fs string
|
||||
}{
|
||||
{0, "0"},
|
||||
{0, "0."},
|
||||
{0, ".0"},
|
||||
{0, "0.0"},
|
||||
{0, "0.0000000001"},
|
||||
{1, "0.000000001"},
|
||||
{1000, "0.000001"},
|
||||
{1000000, "0.001"},
|
||||
{100000000, "0.1"},
|
||||
{100000000, "0.10"},
|
||||
{100000000, "0.1000000001"},
|
||||
{1000000000, "1"},
|
||||
{1000000000, "1."},
|
||||
{1000000000, "1.0"},
|
||||
{10000000000, "10"},
|
||||
{12345678912, "12.345678912"},
|
||||
{12345678912, "12.3456789129"},
|
||||
{12345678912, "12.34567891299"},
|
||||
{12345678910, "12.34567891"},
|
||||
{12345678900, "12.3456789"},
|
||||
{12345678000, "12.345678"},
|
||||
{12345670000, "12.34567"},
|
||||
{12345600000, "12.3456"},
|
||||
{12345000000, "12.345"},
|
||||
{12340000000, "12.34"},
|
||||
{12300000000, "12.3"},
|
||||
{12000000000, "12"},
|
||||
{10000000000, "10"},
|
||||
// This is a typical value which has more bits in than a float64
|
||||
{1347717491123123123, "1347717491.123123123"},
|
||||
} {
|
||||
ns, err := floatStringToNs(d.fs)
|
||||
if err != nil {
|
||||
t.Error("Failed conversion", err)
|
||||
}
|
||||
if ns != d.ns {
|
||||
t.Error("Failed", d.fs, "!=", d.ns, "was", ns)
|
||||
}
|
||||
if d.ns > 0 {
|
||||
ns, err := floatStringToNs("-" + d.fs)
|
||||
if err != nil {
|
||||
t.Error("Failed conversion", err)
|
||||
}
|
||||
if ns != -d.ns {
|
||||
t.Error("Failed on negative", -d.ns, "!=", "-"+d.fs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// These are expected to produce errors
|
||||
for _, fs := range []string{
|
||||
"",
|
||||
" 1",
|
||||
"- 1",
|
||||
"- 1",
|
||||
"1.-1",
|
||||
"1.0.0",
|
||||
"1x0",
|
||||
} {
|
||||
ns, err := floatStringToNs(fs)
|
||||
if err == nil {
|
||||
t.Error("Didn't produce expected error", fs, ns)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetModTime(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns string
|
||||
t string
|
||||
}{
|
||||
{"1354040105", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.0", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.000000000000", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105.123", "2012-11-27T18:15:05.123Z"},
|
||||
{"1354040105.123456", "2012-11-27T18:15:05.123456Z"},
|
||||
{"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"},
|
||||
{"1354040105.123456789123", "2012-11-27T18:15:05.123456789Z"},
|
||||
{"0", "1970-01-01T00:00:00.000000000Z"},
|
||||
} {
|
||||
expected, err := time.Parse(time.RFC3339, d.t)
|
||||
if err != nil {
|
||||
t.Error("Bad test", err)
|
||||
}
|
||||
m := Metadata{"mtime": d.ns}
|
||||
actual, err := m.GetModTime()
|
||||
if err != nil {
|
||||
t.Error("Parse error", err)
|
||||
}
|
||||
if !actual.Equal(expected) {
|
||||
t.Error("Expecting", expected, expected.UnixNano(), "got", actual, actual.UnixNano())
|
||||
}
|
||||
}
|
||||
for _, ns := range []string{
|
||||
"EMPTY",
|
||||
"",
|
||||
" 1",
|
||||
"- 1",
|
||||
"- 1",
|
||||
"1.-1",
|
||||
"1.0.0",
|
||||
"1x0",
|
||||
} {
|
||||
m := Metadata{}
|
||||
if ns != "EMPTY" {
|
||||
m["mtime"] = ns
|
||||
}
|
||||
actual, err := m.GetModTime()
|
||||
if err == nil {
|
||||
t.Error("Expected error not produced")
|
||||
}
|
||||
if !actual.IsZero() {
|
||||
t.Error("Expected output to be zero")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetModTime(t *testing.T) {
|
||||
for _, d := range []struct {
|
||||
ns string
|
||||
t string
|
||||
}{
|
||||
{"1354040105", "2012-11-27T18:15:05Z"},
|
||||
{"1354040105", "2012-11-27T18:15:05.000000Z"},
|
||||
{"1354040105.123", "2012-11-27T18:15:05.123Z"},
|
||||
{"1354040105.123456", "2012-11-27T18:15:05.123456Z"},
|
||||
{"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"},
|
||||
{"0", "1970-01-01T00:00:00.000000000Z"},
|
||||
} {
|
||||
time, err := time.Parse(time.RFC3339, d.t)
|
||||
if err != nil {
|
||||
t.Error("Bad test", err)
|
||||
}
|
||||
m := Metadata{}
|
||||
m.SetModTime(time)
|
||||
if m["mtime"] != d.ns {
|
||||
t.Error("mtime wrong", m, "should be", d.ns)
|
||||
}
|
||||
}
|
||||
}
|
83
vendor/github.com/ncw/swift/rs/rs.go
generated
vendored
Normal file
83
vendor/github.com/ncw/swift/rs/rs.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package rs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
// RsConnection is a RackSpace specific wrapper to the core swift library which
|
||||
// exposes the RackSpace CDN commands via the CDN Management URL interface.
|
||||
type RsConnection struct {
|
||||
swift.Connection
|
||||
cdnUrl string
|
||||
}
|
||||
|
||||
// manage is similar to the swift storage method, but uses the CDN Management URL for CDN specific calls.
|
||||
func (c *RsConnection) manage(p swift.RequestOpts) (resp *http.Response, headers swift.Headers, err error) {
|
||||
p.OnReAuth = func() (string, error) {
|
||||
if c.cdnUrl == "" {
|
||||
c.cdnUrl = c.Auth.CdnUrl()
|
||||
}
|
||||
if c.cdnUrl == "" {
|
||||
return "", errors.New("The X-CDN-Management-Url does not exist on the authenticated platform")
|
||||
}
|
||||
return c.cdnUrl, nil
|
||||
}
|
||||
if c.Authenticated() {
|
||||
_, err = p.OnReAuth()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return c.Connection.Call(c.cdnUrl, p)
|
||||
}
|
||||
|
||||
// ContainerCDNEnable enables a container for public CDN usage.
|
||||
//
|
||||
// Change the default TTL of 259200 seconds (72 hours) by passing in an integer value.
|
||||
//
|
||||
// This method can be called again to change the TTL.
|
||||
func (c *RsConnection) ContainerCDNEnable(container string, ttl int) (swift.Headers, error) {
|
||||
h := swift.Headers{"X-CDN-Enabled": "true"}
|
||||
if ttl > 0 {
|
||||
h["X-TTL"] = strconv.Itoa(ttl)
|
||||
}
|
||||
|
||||
_, headers, err := c.manage(swift.RequestOpts{
|
||||
Container: container,
|
||||
Operation: "PUT",
|
||||
ErrorMap: swift.ContainerErrorMap,
|
||||
NoResponse: true,
|
||||
Headers: h,
|
||||
})
|
||||
return headers, err
|
||||
}
|
||||
|
||||
// ContainerCDNDisable disables CDN access to a container.
|
||||
func (c *RsConnection) ContainerCDNDisable(container string) error {
|
||||
h := swift.Headers{"X-CDN-Enabled": "false"}
|
||||
|
||||
_, _, err := c.manage(swift.RequestOpts{
|
||||
Container: container,
|
||||
Operation: "PUT",
|
||||
ErrorMap: swift.ContainerErrorMap,
|
||||
NoResponse: true,
|
||||
Headers: h,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// ContainerCDNMeta returns the CDN metadata for a container.
|
||||
func (c *RsConnection) ContainerCDNMeta(container string) (swift.Headers, error) {
|
||||
_, headers, err := c.manage(swift.RequestOpts{
|
||||
Container: container,
|
||||
Operation: "HEAD",
|
||||
ErrorMap: swift.ContainerErrorMap,
|
||||
NoResponse: true,
|
||||
Headers: swift.Headers{},
|
||||
})
|
||||
return headers, err
|
||||
}
|
96
vendor/github.com/ncw/swift/rs/rs_test.go
generated
vendored
Normal file
96
vendor/github.com/ncw/swift/rs/rs_test.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// See swift_test.go for requirements to run this test.
|
||||
package rs_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/swift/rs"
|
||||
)
|
||||
|
||||
var (
|
||||
c rs.RsConnection
|
||||
)
|
||||
|
||||
const (
|
||||
CONTAINER = "GoSwiftUnitTest"
|
||||
OBJECT = "test_object"
|
||||
CONTENTS = "12345"
|
||||
CONTENT_SIZE = int64(len(CONTENTS))
|
||||
CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b"
|
||||
)
|
||||
|
||||
// Test functions are run in order - this one must be first!
|
||||
func TestAuthenticate(t *testing.T) {
|
||||
UserName := os.Getenv("SWIFT_API_USER")
|
||||
ApiKey := os.Getenv("SWIFT_API_KEY")
|
||||
AuthUrl := os.Getenv("SWIFT_AUTH_URL")
|
||||
if UserName == "" || ApiKey == "" || AuthUrl == "" {
|
||||
t.Fatal("SWIFT_API_USER, SWIFT_API_KEY and SWIFT_AUTH_URL not all set")
|
||||
}
|
||||
c = rs.RsConnection{}
|
||||
c.UserName = UserName
|
||||
c.ApiKey = ApiKey
|
||||
c.AuthUrl = AuthUrl
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal("Auth failed", err)
|
||||
}
|
||||
if !c.Authenticated() {
|
||||
t.Fatal("Not authenticated")
|
||||
}
|
||||
}
|
||||
|
||||
// Setup
|
||||
func TestContainerCreate(t *testing.T) {
|
||||
err := c.ContainerCreate(CONTAINER, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNEnable(t *testing.T) {
|
||||
headers, err := c.ContainerCDNEnable(CONTAINER, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, ok := headers["X-Cdn-Uri"]; !ok {
|
||||
t.Error("Failed to enable CDN for container")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnReAuth(t *testing.T) {
|
||||
c2 := rs.RsConnection{}
|
||||
c2.UserName = c.UserName
|
||||
c2.ApiKey = c.ApiKey
|
||||
c2.AuthUrl = c.AuthUrl
|
||||
_, err := c2.ContainerCDNEnable(CONTAINER, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reauthenticate: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNMeta(t *testing.T) {
|
||||
headers, err := c.ContainerCDNMeta(CONTAINER)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, ok := headers["X-Cdn-Uri"]; !ok {
|
||||
t.Error("CDN is not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCDNDisable(t *testing.T) {
|
||||
err := c.ContainerCDNDisable(CONTAINER) // files stick in CDN until TTL expires
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Teardown
|
||||
func TestContainerDelete(t *testing.T) {
|
||||
err := c.ContainerDelete(CONTAINER)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
168
vendor/github.com/ncw/swift/slo.go
generated
vendored
Normal file
168
vendor/github.com/ncw/swift/slo.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StaticLargeObjectCreateFile represents an open static large object
|
||||
type StaticLargeObjectCreateFile struct {
|
||||
largeObjectCreateFile
|
||||
}
|
||||
|
||||
var SLONotSupported = errors.New("SLO not supported")
|
||||
|
||||
type swiftSegment struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Etag string `json:"etag,omitempty"`
|
||||
Size int64 `json:"size_bytes,omitempty"`
|
||||
// When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
|
||||
// but when querying the JSON content of a manifest with the `multipart-manifest=get`
|
||||
// parameter, Swift names those attributes `name`, `hash` and `bytes`.
|
||||
// We use all the different attributes names in this structure to be able to use
|
||||
// the same structure for both uploading and retrieving.
|
||||
Name string `json:"name,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
Bytes int64 `json:"bytes,omitempty"`
|
||||
ContentType string `json:"content_type,omitempty"`
|
||||
LastModified string `json:"last_modified,omitempty"`
|
||||
}
|
||||
|
||||
// StaticLargeObjectCreateFile creates a static large object returning
|
||||
// an object which satisfies io.Writer, io.Seeker, io.Closer and
|
||||
// io.ReaderFrom. The flags are as passed to the largeObjectCreate
|
||||
// method.
|
||||
func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err != nil || !info.SupportsSLO() {
|
||||
return nil, SLONotSupported
|
||||
}
|
||||
realMinChunkSize := info.SLOMinSegmentSize()
|
||||
if realMinChunkSize > opts.MinChunkSize {
|
||||
opts.MinChunkSize = realMinChunkSize
|
||||
}
|
||||
lo, err := c.largeObjectCreate(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withBuffer(opts, &StaticLargeObjectCreateFile{
|
||||
largeObjectCreateFile: *lo,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// StaticLargeObjectCreate creates or truncates an existing static
|
||||
// large object returning a writeable object. This sets opts.Flags to
|
||||
// an appropriate value before calling StaticLargeObjectCreateFile
|
||||
func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
||||
return c.StaticLargeObjectCreateFile(opts)
|
||||
}
|
||||
|
||||
// StaticLargeObjectDelete deletes a static large object and all of its segments.
|
||||
func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err != nil || !info.SupportsSLO() {
|
||||
return SLONotSupported
|
||||
}
|
||||
return c.LargeObjectDelete(container, path)
|
||||
}
|
||||
|
||||
// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
||||
func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
||||
swiftInfo, err := c.cachedQueryInfo()
|
||||
if err != nil || !swiftInfo.SupportsSLO() {
|
||||
return SLONotSupported
|
||||
}
|
||||
info, headers, err := c.Object(srcContainer, srcObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSLOManifest creates a static large object manifest
|
||||
func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object) error {
|
||||
sloSegments := make([]swiftSegment, len(segments))
|
||||
for i, segment := range segments {
|
||||
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
|
||||
sloSegments[i].Etag = segment.Hash
|
||||
sloSegments[i].Size = segment.Bytes
|
||||
}
|
||||
|
||||
content, err := json.Marshal(sloSegments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("multipart-manifest", "put")
|
||||
if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, nil, values); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *StaticLargeObjectCreateFile) Close() error {
|
||||
return file.Flush()
|
||||
}
|
||||
|
||||
func (file *StaticLargeObjectCreateFile) Flush() error {
|
||||
if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments); err != nil {
|
||||
return err
|
||||
}
|
||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
||||
}
|
||||
|
||||
func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) {
|
||||
var (
|
||||
segmentList []swiftSegment
|
||||
segments []Object
|
||||
segPath string
|
||||
segmentContainer string
|
||||
)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("multipart-manifest", "get")
|
||||
|
||||
file, _, err := c.objectOpen(container, path, true, nil, values)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
json.Unmarshal(content, &segmentList)
|
||||
for _, segment := range segmentList {
|
||||
segmentContainer, segPath = parseFullPath(segment.Name[1:])
|
||||
segments = append(segments, Object{
|
||||
Name: segPath,
|
||||
Bytes: segment.Bytes,
|
||||
Hash: segment.Hash,
|
||||
})
|
||||
}
|
||||
|
||||
return segmentContainer, segments, nil
|
||||
}
|
277
vendor/github.com/ncw/swift/swift.go
generated
vendored
277
vendor/github.com/ncw/swift/swift.go
generated
vendored
|
@ -33,6 +33,17 @@ const (
|
|||
allObjectsChanLimit = 1000 // ...when fetching to a channel
|
||||
)
|
||||
|
||||
// ObjectType is the type of the swift object, regular, static large,
|
||||
// or dynamic large.
|
||||
type ObjectType int
|
||||
|
||||
// Values that ObjectType can take
|
||||
const (
|
||||
RegularObjectType ObjectType = iota
|
||||
StaticLargeObjectType
|
||||
DynamicLargeObjectType
|
||||
)
|
||||
|
||||
// Connection holds the details of the connection to the swift server.
|
||||
//
|
||||
// You need to provide UserName, ApiKey and AuthUrl when you create a
|
||||
|
@ -108,6 +119,8 @@ type Connection struct {
|
|||
client *http.Client
|
||||
Auth Authenticator `json:"-" xml:"-"` // the current authenticator
|
||||
authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth
|
||||
// swiftInfo is filled after QueryInfo is called
|
||||
swiftInfo SwiftInfo
|
||||
}
|
||||
|
||||
// Error - all errors generated by this package are of this type. Other error
|
||||
|
@ -406,6 +419,24 @@ func (c *Connection) authenticated() bool {
|
|||
// the enabled middlewares and their configuration
|
||||
type SwiftInfo map[string]interface{}
|
||||
|
||||
func (i SwiftInfo) SupportsBulkDelete() bool {
|
||||
_, val := i["bulk_delete"]
|
||||
return val
|
||||
}
|
||||
|
||||
func (i SwiftInfo) SupportsSLO() bool {
|
||||
_, val := i["slo"]
|
||||
return val
|
||||
}
|
||||
|
||||
func (i SwiftInfo) SLOMinSegmentSize() int64 {
|
||||
if slo, ok := i["slo"].(map[string]interface{}); ok {
|
||||
val, _ := slo["min_segment_size"].(float64)
|
||||
return int64(val)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Discover Swift configuration by doing a request against /info
|
||||
func (c *Connection) QueryInfo() (infos SwiftInfo, err error) {
|
||||
infoUrl, err := url.Parse(c.StorageUrl)
|
||||
|
@ -413,14 +444,36 @@ func (c *Connection) QueryInfo() (infos SwiftInfo, err error) {
|
|||
return nil, err
|
||||
}
|
||||
infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info")
|
||||
resp, err := http.Get(infoUrl.String())
|
||||
resp, err := c.client.Get(infoUrl.String())
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("Invalid status code for info request: %d", resp.StatusCode)
|
||||
}
|
||||
err = readJson(resp, &infos)
|
||||
if err == nil {
|
||||
c.authLock.Lock()
|
||||
c.swiftInfo = infos
|
||||
c.authLock.Unlock()
|
||||
}
|
||||
return infos, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (c *Connection) cachedQueryInfo() (infos SwiftInfo, err error) {
|
||||
c.authLock.Lock()
|
||||
infos = c.swiftInfo
|
||||
c.authLock.Unlock()
|
||||
if infos == nil {
|
||||
infos, err = c.QueryInfo()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// RequestOpts contains parameters for Connection.storage.
|
||||
type RequestOpts struct {
|
||||
Container string
|
||||
|
@ -796,14 +849,15 @@ func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string,
|
|||
|
||||
// Object contains information about an object
|
||||
type Object struct {
|
||||
Name string `json:"name"` // object name
|
||||
ContentType string `json:"content_type"` // eg application/directory
|
||||
Bytes int64 `json:"bytes"` // size in bytes
|
||||
ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server
|
||||
LastModified time.Time // Last modified time converted to a time.Time
|
||||
Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
|
||||
PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist
|
||||
SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories"
|
||||
Name string `json:"name"` // object name
|
||||
ContentType string `json:"content_type"` // eg application/directory
|
||||
Bytes int64 `json:"bytes"` // size in bytes
|
||||
ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server
|
||||
LastModified time.Time // Last modified time converted to a time.Time
|
||||
Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
|
||||
PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist
|
||||
SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories"
|
||||
ObjectType ObjectType // type of this object
|
||||
}
|
||||
|
||||
// Objects returns a slice of Object with information about each
|
||||
|
@ -1215,7 +1269,7 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash
|
|||
}
|
||||
// Run the PUT in the background piping it data
|
||||
go func() {
|
||||
file.resp, file.headers, file.err = c.storage(RequestOpts{
|
||||
opts := RequestOpts{
|
||||
Container: container,
|
||||
ObjectName: objectName,
|
||||
Operation: "PUT",
|
||||
|
@ -1223,7 +1277,8 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash
|
|||
Body: pipeReader,
|
||||
NoResponse: true,
|
||||
ErrorMap: objectErrorMap,
|
||||
})
|
||||
}
|
||||
file.resp, file.headers, file.err = c.storage(opts)
|
||||
// Signal finished
|
||||
pipeReader.Close()
|
||||
close(file.done)
|
||||
|
@ -1231,6 +1286,37 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash
|
|||
return
|
||||
}
|
||||
|
||||
func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) {
|
||||
extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h)
|
||||
hash := md5.New()
|
||||
var body io.Reader = contents
|
||||
if checkHash {
|
||||
body = io.TeeReader(contents, hash)
|
||||
}
|
||||
_, headers, err = c.storage(RequestOpts{
|
||||
Container: container,
|
||||
ObjectName: objectName,
|
||||
Operation: "PUT",
|
||||
Headers: extraHeaders,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
ErrorMap: objectErrorMap,
|
||||
Parameters: parameters,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if checkHash {
|
||||
receivedMd5 := strings.ToLower(headers["Etag"])
|
||||
calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil))
|
||||
if receivedMd5 != calculatedMd5 {
|
||||
err = ObjectCorrupted
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ObjectPut creates or updates the path in the container from
|
||||
// contents. contents should be an open io.Reader which will have all
|
||||
// its contents read.
|
||||
|
@ -1253,33 +1339,7 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash
|
|||
// If contentType is set it will be used, otherwise one will be
|
||||
// guessed from objectName using mime.TypeByExtension
|
||||
func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) {
|
||||
extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h)
|
||||
hash := md5.New()
|
||||
var body io.Reader = contents
|
||||
if checkHash {
|
||||
body = io.TeeReader(contents, hash)
|
||||
}
|
||||
_, headers, err = c.storage(RequestOpts{
|
||||
Container: container,
|
||||
ObjectName: objectName,
|
||||
Operation: "PUT",
|
||||
Headers: extraHeaders,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
ErrorMap: objectErrorMap,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if checkHash {
|
||||
receivedMd5 := strings.ToLower(headers["Etag"])
|
||||
calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil))
|
||||
if receivedMd5 != calculatedMd5 {
|
||||
err = ObjectCorrupted
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
return c.objectPut(container, objectName, contents, checkHash, Hash, contentType, h, nil)
|
||||
}
|
||||
|
||||
// ObjectPutBytes creates an object from a []byte in a container.
|
||||
|
@ -1287,7 +1347,8 @@ func (c *Connection) ObjectPut(container string, objectName string, contents io.
|
|||
// This is a simplified interface which checks the MD5.
|
||||
func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) {
|
||||
buf := bytes.NewBuffer(contents)
|
||||
_, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil)
|
||||
h := Headers{"Content-Length": strconv.Itoa(len(contents))}
|
||||
_, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1296,7 +1357,8 @@ func (c *Connection) ObjectPutBytes(container string, objectName string, content
|
|||
// This is a simplified interface which checks the MD5
|
||||
func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) {
|
||||
buf := strings.NewReader(contents)
|
||||
_, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil)
|
||||
h := Headers{"Content-Length": strconv.Itoa(len(contents))}
|
||||
_, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1441,6 +1503,57 @@ func (file *ObjectOpenFile) Close() (err error) {
|
|||
var _ io.ReadCloser = &ObjectOpenFile{}
|
||||
var _ io.Seeker = &ObjectOpenFile{}
|
||||
|
||||
func (c *Connection) objectOpenBase(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) {
|
||||
var resp *http.Response
|
||||
opts := RequestOpts{
|
||||
Container: container,
|
||||
ObjectName: objectName,
|
||||
Operation: "GET",
|
||||
ErrorMap: objectErrorMap,
|
||||
Headers: h,
|
||||
Parameters: parameters,
|
||||
}
|
||||
resp, headers, err = c.storage(opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set
|
||||
if checkHash && headers.IsLargeObject() {
|
||||
// log.Printf("swift: turning off md5 checking on object with manifest %v", objectName)
|
||||
checkHash = false
|
||||
}
|
||||
file = &ObjectOpenFile{
|
||||
connection: c,
|
||||
container: container,
|
||||
objectName: objectName,
|
||||
headers: h,
|
||||
resp: resp,
|
||||
checkHash: checkHash,
|
||||
body: resp.Body,
|
||||
}
|
||||
if checkHash {
|
||||
file.hash = md5.New()
|
||||
file.body = io.TeeReader(resp.Body, file.hash)
|
||||
}
|
||||
// Read Content-Length
|
||||
if resp.Header.Get("Content-Length") != "" {
|
||||
file.length, err = getInt64FromHeader(resp, "Content-Length")
|
||||
file.lengthOk = (err == nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Connection) objectOpen(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) {
|
||||
err = withLORetry(0, func() (Headers, int64, error) {
|
||||
file, headers, err = c.objectOpenBase(container, objectName, checkHash, h, parameters)
|
||||
if err != nil {
|
||||
return headers, 0, err
|
||||
}
|
||||
return headers, file.length, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// ObjectOpen returns an ObjectOpenFile for reading the contents of
|
||||
// the object. This satisfies the io.ReadCloser and the io.Seeker
|
||||
// interfaces.
|
||||
|
@ -1465,41 +1578,7 @@ var _ io.Seeker = &ObjectOpenFile{}
|
|||
//
|
||||
// headers["Content-Type"] will give the content type if desired.
|
||||
func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) {
|
||||
var resp *http.Response
|
||||
resp, headers, err = c.storage(RequestOpts{
|
||||
Container: container,
|
||||
ObjectName: objectName,
|
||||
Operation: "GET",
|
||||
ErrorMap: objectErrorMap,
|
||||
Headers: h,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set
|
||||
if checkHash && (headers["X-Object-Manifest"] != "" || headers["X-Static-Large-Object"] != "") {
|
||||
// log.Printf("swift: turning off md5 checking on object with manifest %v", objectName)
|
||||
checkHash = false
|
||||
}
|
||||
file = &ObjectOpenFile{
|
||||
connection: c,
|
||||
container: container,
|
||||
objectName: objectName,
|
||||
headers: h,
|
||||
resp: resp,
|
||||
checkHash: checkHash,
|
||||
body: resp.Body,
|
||||
}
|
||||
if checkHash {
|
||||
file.hash = md5.New()
|
||||
file.body = io.TeeReader(resp.Body, file.hash)
|
||||
}
|
||||
// Read Content-Length
|
||||
if resp.Header.Get("Content-Length") != "" {
|
||||
file.length, err = getInt64FromHeader(resp, "Content-Length")
|
||||
file.lengthOk = (err == nil)
|
||||
}
|
||||
return
|
||||
return c.objectOpen(container, objectName, checkHash, h, nil)
|
||||
}
|
||||
|
||||
// ObjectGet gets the object into the io.Writer contents.
|
||||
|
@ -1602,19 +1681,10 @@ type BulkDeleteResult struct {
|
|||
Headers Headers // Response HTTP headers.
|
||||
}
|
||||
|
||||
// BulkDelete deletes multiple objectNames from container in one operation.
|
||||
//
|
||||
// Some servers may not accept bulk-delete requests since bulk-delete is
|
||||
// an optional feature of swift - these will return the Forbidden error.
|
||||
//
|
||||
// See also:
|
||||
// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html
|
||||
// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html
|
||||
func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) {
|
||||
func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) {
|
||||
var buffer bytes.Buffer
|
||||
for _, s := range objectNames {
|
||||
buffer.WriteString(fmt.Sprintf("/%s/%s\n", container,
|
||||
url.QueryEscape(s)))
|
||||
for _, s := range objects {
|
||||
buffer.WriteString(url.QueryEscape(s) + "\n")
|
||||
}
|
||||
resp, headers, err := c.storage(RequestOpts{
|
||||
Operation: "DELETE",
|
||||
|
@ -1655,6 +1725,22 @@ func (c *Connection) BulkDelete(container string, objectNames []string) (result
|
|||
return
|
||||
}
|
||||
|
||||
// BulkDelete deletes multiple objectNames from container in one operation.
|
||||
//
|
||||
// Some servers may not accept bulk-delete requests since bulk-delete is
|
||||
// an optional feature of swift - these will return the Forbidden error.
|
||||
//
|
||||
// See also:
|
||||
// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html
|
||||
// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html
|
||||
func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) {
|
||||
fullPaths := make([]string, len(objectNames))
|
||||
for i, name := range objectNames {
|
||||
fullPaths[i] = fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
return c.doBulkDelete(fullPaths)
|
||||
}
|
||||
|
||||
// BulkUploadResult stores results of BulkUpload().
|
||||
//
|
||||
// Individual errors may (or may not) be returned by Errors.
|
||||
|
@ -1738,6 +1824,17 @@ func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format
|
|||
//
|
||||
// Use headers.ObjectMetadata() to read the metadata in the Headers.
|
||||
func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) {
|
||||
err = withLORetry(0, func() (Headers, int64, error) {
|
||||
info, headers, err = c.objectBase(container, objectName)
|
||||
if err != nil {
|
||||
return headers, 0, err
|
||||
}
|
||||
return headers, info.Bytes, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Connection) objectBase(container string, objectName string) (info Object, headers Headers, err error) {
|
||||
var resp *http.Response
|
||||
resp, headers, err = c.storage(RequestOpts{
|
||||
Container: container,
|
||||
|
@ -1778,6 +1875,12 @@ func (c *Connection) Object(container string, objectName string) (info Object, h
|
|||
}
|
||||
|
||||
info.Hash = resp.Header.Get("Etag")
|
||||
if resp.Header.Get("X-Object-Manifest") != "" {
|
||||
info.ObjectType = DynamicLargeObjectType
|
||||
} else if resp.Header.Get("X-Static-Large-Object") != "" {
|
||||
info.ObjectType = StaticLargeObjectType
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
428
vendor/github.com/ncw/swift/swift_internal_test.go
generated
vendored
Normal file
428
vendor/github.com/ncw/swift/swift_internal_test.go
generated
vendored
Normal file
|
@ -0,0 +1,428 @@
|
|||
// This tests the swift package internals
|
||||
//
|
||||
// It does not require access to a swift server
|
||||
//
|
||||
// FIXME need to add more tests and to check URLs and parameters
|
||||
package swift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
TEST_ADDRESS = "localhost:5324"
|
||||
AUTH_URL = "http://" + TEST_ADDRESS + "/v1.0"
|
||||
PROXY_URL = "http://" + TEST_ADDRESS + "/proxy"
|
||||
USERNAME = "test"
|
||||
APIKEY = "apikey"
|
||||
AUTH_TOKEN = "token"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
server *SwiftServer
|
||||
c *Connection
|
||||
)
|
||||
|
||||
// SwiftServer implements a test swift server
|
||||
type SwiftServer struct {
|
||||
t *testing.T
|
||||
checks []*Check
|
||||
}
|
||||
|
||||
// Used to check and reply to http transactions
|
||||
type Check struct {
|
||||
in Headers
|
||||
out Headers
|
||||
rx *string
|
||||
tx *string
|
||||
err *Error
|
||||
url *string
|
||||
}
|
||||
|
||||
// Add a in check
|
||||
func (check *Check) In(in Headers) *Check {
|
||||
check.in = in
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an out check
|
||||
func (check *Check) Out(out Headers) *Check {
|
||||
check.out = out
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an Error check
|
||||
func (check *Check) Error(StatusCode int, Text string) *Check {
|
||||
check.err = newError(StatusCode, Text)
|
||||
return check
|
||||
}
|
||||
|
||||
// Add a rx check
|
||||
func (check *Check) Rx(rx string) *Check {
|
||||
check.rx = &rx
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an tx check
|
||||
func (check *Check) Tx(tx string) *Check {
|
||||
check.tx = &tx
|
||||
return check
|
||||
}
|
||||
|
||||
// Add an URL check
|
||||
func (check *Check) Url(url string) *Check {
|
||||
check.url = &url
|
||||
return check
|
||||
}
|
||||
|
||||
// Add a check
|
||||
func (s *SwiftServer) AddCheck(t *testing.T) *Check {
|
||||
server.t = t
|
||||
check := &Check{
|
||||
in: Headers{},
|
||||
out: Headers{},
|
||||
err: nil,
|
||||
}
|
||||
s.checks = append(s.checks, check)
|
||||
return check
|
||||
}
|
||||
|
||||
// Responds to a request
|
||||
func (s *SwiftServer) Respond(w http.ResponseWriter, r *http.Request) {
|
||||
if len(s.checks) < 1 {
|
||||
s.t.Fatal("Unexpected http transaction")
|
||||
}
|
||||
check := s.checks[0]
|
||||
s.checks = s.checks[1:]
|
||||
|
||||
// Check URL
|
||||
if check.url != nil && *check.url != r.URL.String() {
|
||||
s.t.Errorf("Expecting URL %q but got %q", *check.url, r.URL)
|
||||
}
|
||||
|
||||
// Check headers
|
||||
for k, v := range check.in {
|
||||
actual := r.Header.Get(k)
|
||||
if actual != v {
|
||||
s.t.Errorf("Expecting header %q=%q but got %q", k, v, actual)
|
||||
}
|
||||
}
|
||||
// Write output headers
|
||||
h := w.Header()
|
||||
for k, v := range check.out {
|
||||
h.Set(k, v)
|
||||
}
|
||||
// Return an error if required
|
||||
if check.err != nil {
|
||||
http.Error(w, check.err.Text, check.err.StatusCode)
|
||||
} else {
|
||||
if check.tx != nil {
|
||||
_, err := w.Write([]byte(*check.tx))
|
||||
if err != nil {
|
||||
s.t.Error("Write failed", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks to see all responses are used up
|
||||
func (s *SwiftServer) Finished() {
|
||||
if len(s.checks) > 0 {
|
||||
s.t.Error("Unused checks", s.checks)
|
||||
}
|
||||
}
|
||||
|
||||
func handle(w http.ResponseWriter, r *http.Request) {
|
||||
// out, _ := httputil.DumpRequest(r, true)
|
||||
// os.Stdout.Write(out)
|
||||
server.Respond(w, r)
|
||||
}
|
||||
|
||||
func NewSwiftServer() *SwiftServer {
|
||||
server := &SwiftServer{}
|
||||
http.HandleFunc("/", handle)
|
||||
go http.ListenAndServe(TEST_ADDRESS, nil)
|
||||
fmt.Print("Waiting for server to start ")
|
||||
for {
|
||||
fmt.Print(".")
|
||||
conn, err := net.Dial("tcp", TEST_ADDRESS)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
fmt.Println(" Started")
|
||||
break
|
||||
}
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
func init() {
|
||||
server = NewSwiftServer()
|
||||
c = &Connection{
|
||||
UserName: USERNAME,
|
||||
ApiKey: APIKEY,
|
||||
AuthUrl: AUTH_URL,
|
||||
}
|
||||
}
|
||||
|
||||
// Check the error is a swift error
|
||||
func checkError(t *testing.T, err error, StatusCode int, Text string) {
|
||||
if err == nil {
|
||||
t.Fatal("No error returned")
|
||||
}
|
||||
err2, ok := err.(*Error)
|
||||
if !ok {
|
||||
t.Fatal("Bad error type")
|
||||
}
|
||||
if err2.StatusCode != StatusCode {
|
||||
t.Fatalf("Bad status code, expecting %d got %d", StatusCode, err2.StatusCode)
|
||||
}
|
||||
if err2.Text != Text {
|
||||
t.Fatalf("Bad error string, expecting %q got %q", Text, err2.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME copied from swift_test.go
|
||||
func compareMaps(t *testing.T, a, b map[string]string) {
|
||||
if len(a) != len(b) {
|
||||
t.Error("Maps different sizes", a, b)
|
||||
}
|
||||
for ka, va := range a {
|
||||
if vb, ok := b[ka]; !ok || va != vb {
|
||||
t.Error("Difference in key", ka, va, b[ka])
|
||||
}
|
||||
}
|
||||
for kb, vb := range b {
|
||||
if va, ok := a[kb]; !ok || vb != va {
|
||||
t.Error("Difference in key", kb, vb, a[kb])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalError(t *testing.T) {
|
||||
e := newError(404, "Not Found!")
|
||||
if e.StatusCode != 404 || e.Text != "Not Found!" {
|
||||
t.Fatal("Bad error")
|
||||
}
|
||||
if e.Error() != "Not Found!" {
|
||||
t.Fatal("Bad error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testCheckClose(c io.Closer, e error) (err error) {
|
||||
err = e
|
||||
defer checkClose(c, &err)
|
||||
return
|
||||
}
|
||||
|
||||
// Make a closer which returns the error of our choice
|
||||
type myCloser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *myCloser) Close() error {
|
||||
return c.err
|
||||
}
|
||||
|
||||
func TestInternalCheckClose(t *testing.T) {
|
||||
if testCheckClose(&myCloser{nil}, nil) != nil {
|
||||
t.Fatal("bad 1")
|
||||
}
|
||||
if testCheckClose(&myCloser{nil}, ObjectCorrupted) != ObjectCorrupted {
|
||||
t.Fatal("bad 2")
|
||||
}
|
||||
if testCheckClose(&myCloser{ObjectNotFound}, nil) != ObjectNotFound {
|
||||
t.Fatal("bad 3")
|
||||
}
|
||||
if testCheckClose(&myCloser{ObjectNotFound}, ObjectCorrupted) != ObjectCorrupted {
|
||||
t.Fatal("bad 4")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalParseHeaders(t *testing.T) {
|
||||
resp := &http.Response{StatusCode: 200}
|
||||
if c.parseHeaders(resp, nil) != nil {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
if c.parseHeaders(resp, authErrorMap) != nil {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
|
||||
resp = &http.Response{StatusCode: 299}
|
||||
if c.parseHeaders(resp, nil) != nil {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
|
||||
resp = &http.Response{StatusCode: 199, Status: "BOOM"}
|
||||
checkError(t, c.parseHeaders(resp, nil), 199, "HTTP Error: 199: BOOM")
|
||||
|
||||
resp = &http.Response{StatusCode: 300, Status: "BOOM"}
|
||||
checkError(t, c.parseHeaders(resp, nil), 300, "HTTP Error: 300: BOOM")
|
||||
|
||||
resp = &http.Response{StatusCode: 404, Status: "BOOM"}
|
||||
checkError(t, c.parseHeaders(resp, nil), 404, "HTTP Error: 404: BOOM")
|
||||
if c.parseHeaders(resp, ContainerErrorMap) != ContainerNotFound {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
if c.parseHeaders(resp, objectErrorMap) != ObjectNotFound {
|
||||
t.Error("Bad 1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalReadHeaders(t *testing.T) {
|
||||
resp := &http.Response{Header: http.Header{}}
|
||||
compareMaps(t, readHeaders(resp), Headers{})
|
||||
|
||||
resp = &http.Response{Header: http.Header{
|
||||
"one": []string{"1"},
|
||||
"two": []string{"2"},
|
||||
}}
|
||||
compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"})
|
||||
|
||||
// FIXME this outputs a log which we should test and check
|
||||
resp = &http.Response{Header: http.Header{
|
||||
"one": []string{"1", "11", "111"},
|
||||
"two": []string{"2"},
|
||||
}}
|
||||
compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"})
|
||||
}
|
||||
|
||||
func TestInternalStorage(t *testing.T) {
|
||||
// FIXME
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func TestInternalAuthenticate(t *testing.T) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Key": APIKEY,
|
||||
"X-Auth-User": USERNAME,
|
||||
}).Out(Headers{
|
||||
"X-Storage-Url": PROXY_URL,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
}).Url("/v1.0")
|
||||
defer server.Finished()
|
||||
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c.StorageUrl != PROXY_URL {
|
||||
t.Error("Bad storage url")
|
||||
}
|
||||
if c.AuthToken != AUTH_TOKEN {
|
||||
t.Error("Bad auth token")
|
||||
}
|
||||
if !c.Authenticated() {
|
||||
t.Error("Didn't authenticate")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalAuthenticateDenied(t *testing.T) {
|
||||
server.AddCheck(t).Error(400, "Bad request")
|
||||
server.AddCheck(t).Error(401, "DENIED")
|
||||
defer server.Finished()
|
||||
c.UnAuthenticate()
|
||||
err := c.Authenticate()
|
||||
if err != AuthorizationFailed {
|
||||
t.Fatal("Expecting AuthorizationFailed", err)
|
||||
}
|
||||
// FIXME
|
||||
// if c.Authenticated() {
|
||||
// t.Fatal("Expecting not authenticated")
|
||||
// }
|
||||
}
|
||||
|
||||
func TestInternalAuthenticateBad(t *testing.T) {
|
||||
server.AddCheck(t).Out(Headers{
|
||||
"X-Storage-Url": PROXY_URL,
|
||||
})
|
||||
defer server.Finished()
|
||||
err := c.Authenticate()
|
||||
checkError(t, err, 0, "Response didn't have storage url and auth token")
|
||||
if c.Authenticated() {
|
||||
t.Fatal("Expecting not authenticated")
|
||||
}
|
||||
|
||||
server.AddCheck(t).Out(Headers{
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
})
|
||||
err = c.Authenticate()
|
||||
checkError(t, err, 0, "Response didn't have storage url and auth token")
|
||||
if c.Authenticated() {
|
||||
t.Fatal("Expecting not authenticated")
|
||||
}
|
||||
|
||||
server.AddCheck(t)
|
||||
err = c.Authenticate()
|
||||
checkError(t, err, 0, "Response didn't have storage url and auth token")
|
||||
if c.Authenticated() {
|
||||
t.Fatal("Expecting not authenticated")
|
||||
}
|
||||
|
||||
server.AddCheck(t).Out(Headers{
|
||||
"X-Storage-Url": PROXY_URL,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
})
|
||||
err = c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !c.Authenticated() {
|
||||
t.Fatal("Expecting authenticated")
|
||||
}
|
||||
}
|
||||
|
||||
func testContainerNames(t *testing.T, rx string, expected []string) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
}).Tx(rx).Url("/proxy")
|
||||
containers, err := c.ContainerNames(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(containers) != len(expected) {
|
||||
t.Fatal("Wrong number of containers", len(containers), rx, len(expected), expected)
|
||||
}
|
||||
for i := range containers {
|
||||
if containers[i] != expected[i] {
|
||||
t.Error("Bad container", containers[i], expected[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestInternalContainerNames(t *testing.T) {
|
||||
defer server.Finished()
|
||||
testContainerNames(t, "", []string{})
|
||||
testContainerNames(t, "one", []string{"one"})
|
||||
testContainerNames(t, "one\n", []string{"one"})
|
||||
testContainerNames(t, "one\ntwo\nthree\n", []string{"one", "two", "three"})
|
||||
}
|
||||
|
||||
func TestInternalObjectPutBytes(t *testing.T) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
"Content-Length": "5",
|
||||
"Content-Type": "text/plain",
|
||||
}).Rx("12345")
|
||||
defer server.Finished()
|
||||
c.ObjectPutBytes("container", "object", []byte{'1', '2', '3', '4', '5'}, "text/plain")
|
||||
}
|
||||
|
||||
func TestInternalObjectPutString(t *testing.T) {
|
||||
server.AddCheck(t).In(Headers{
|
||||
"User-Agent": DefaultUserAgent,
|
||||
"X-Auth-Token": AUTH_TOKEN,
|
||||
"Content-Length": "5",
|
||||
"Content-Type": "text/plain",
|
||||
}).Rx("12345")
|
||||
defer server.Finished()
|
||||
c.ObjectPutString("container", "object", "12345", "text/plain")
|
||||
}
|
2891
vendor/github.com/ncw/swift/swift_test.go
generated
vendored
Normal file
2891
vendor/github.com/ncw/swift/swift_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1094
vendor/github.com/ncw/swift/swifttest/server.go
generated
vendored
Normal file
1094
vendor/github.com/ncw/swift/swifttest/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
107
vendor/github.com/ncw/swift/timeout_reader_test.go
generated
vendored
Normal file
107
vendor/github.com/ncw/swift/timeout_reader_test.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
// This tests TimeoutReader
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An io.ReadCloser for testing
|
||||
type testReader struct {
|
||||
sync.Mutex
|
||||
n int
|
||||
delay time.Duration
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Returns n bytes with at time.Duration delay
|
||||
func newTestReader(n int, delay time.Duration) *testReader {
|
||||
return &testReader{
|
||||
n: n,
|
||||
delay: delay,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns 1 byte at a time after delay
|
||||
func (t *testReader) Read(p []byte) (n int, err error) {
|
||||
if t.n <= 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
time.Sleep(t.delay)
|
||||
p[0] = 'A'
|
||||
t.Lock()
|
||||
t.n--
|
||||
t.Unlock()
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// Close the channel
|
||||
func (t *testReader) Close() error {
|
||||
t.Lock()
|
||||
t.closed = true
|
||||
t.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestTimeoutReaderNoTimeout(t *testing.T) {
|
||||
test := newTestReader(3, 10*time.Millisecond)
|
||||
cancelled := false
|
||||
cancel := func() {
|
||||
cancelled = true
|
||||
}
|
||||
tr := newTimeoutReader(test, 100*time.Millisecond, cancel)
|
||||
b, err := ioutil.ReadAll(tr)
|
||||
if err != nil || string(b) != "AAA" {
|
||||
t.Fatalf("Bad read %s %s", err, b)
|
||||
}
|
||||
if cancelled {
|
||||
t.Fatal("Cancelled when shouldn't have been")
|
||||
}
|
||||
if test.n != 0 {
|
||||
t.Fatal("Didn't read all")
|
||||
}
|
||||
if test.closed {
|
||||
t.Fatal("Shouldn't be closed")
|
||||
}
|
||||
tr.Close()
|
||||
if !test.closed {
|
||||
t.Fatal("Should be closed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeoutReaderTimeout(t *testing.T) {
|
||||
// Return those bytes slowly so we get an idle timeout
|
||||
test := newTestReader(3, 100*time.Millisecond)
|
||||
cancelled := false
|
||||
cancel := func() {
|
||||
cancelled = true
|
||||
}
|
||||
tr := newTimeoutReader(test, 10*time.Millisecond, cancel)
|
||||
_, err := ioutil.ReadAll(tr)
|
||||
if err != TimeoutError {
|
||||
t.Fatal("Expecting TimeoutError, got", err)
|
||||
}
|
||||
if !cancelled {
|
||||
t.Fatal("Not cancelled when should have been")
|
||||
}
|
||||
test.Lock()
|
||||
n := test.n
|
||||
test.Unlock()
|
||||
if n == 0 {
|
||||
t.Fatal("Read all")
|
||||
}
|
||||
if n != 3 {
|
||||
t.Fatal("Didn't read any")
|
||||
}
|
||||
if test.closed {
|
||||
t.Fatal("Shouldn't be closed")
|
||||
}
|
||||
tr.Close()
|
||||
if !test.closed {
|
||||
t.Fatal("Should be closed")
|
||||
}
|
||||
}
|
22
vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
Executable file
22
vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
Executable file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ ! "${TRAVIS_BRANCH}" = "master" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then
|
||||
echo "Running tests pointing to Rackspace"
|
||||
export SWIFT_API_KEY=$RACKSPACE_APIKEY
|
||||
export SWIFT_API_USER=$RACKSPACE_USER
|
||||
export SWIFT_AUTH_URL=$RACKSPACE_AUTH
|
||||
go test ./...
|
||||
fi
|
||||
|
||||
if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then
|
||||
echo "Running tests pointing to Memset"
|
||||
export SWIFT_API_KEY=$MEMSET_APIKEY
|
||||
export SWIFT_API_USER=$MEMSET_USER
|
||||
export SWIFT_AUTH_URL=$MEMSET_AUTH
|
||||
go test
|
||||
fi
|
43
vendor/github.com/ncw/swift/watchdog_reader.go
generated
vendored
43
vendor/github.com/ncw/swift/watchdog_reader.go
generated
vendored
|
@ -5,29 +5,50 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var watchdogChunkSize = 1 << 20 // 1 MiB
|
||||
|
||||
// An io.Reader which resets a watchdog timer whenever data is read
|
||||
type watchdogReader struct {
|
||||
timeout time.Duration
|
||||
reader io.Reader
|
||||
timer *time.Timer
|
||||
timeout time.Duration
|
||||
reader io.Reader
|
||||
timer *time.Timer
|
||||
chunkSize int
|
||||
}
|
||||
|
||||
// Returns a new reader which will kick the watchdog timer whenever data is read
|
||||
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
|
||||
return &watchdogReader{
|
||||
timeout: timeout,
|
||||
reader: reader,
|
||||
timer: timer,
|
||||
timeout: timeout,
|
||||
reader: reader,
|
||||
timer: timer,
|
||||
chunkSize: watchdogChunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p
|
||||
func (t *watchdogReader) Read(p []byte) (n int, err error) {
|
||||
// FIXME limit the amount of data read in one chunk so as to not exceed the timeout?
|
||||
func (t *watchdogReader) Read(p []byte) (int, error) {
|
||||
//read from underlying reader in chunks not larger than t.chunkSize
|
||||
//while resetting the watchdog timer before every read; the small chunk
|
||||
//size ensures that the timer does not fire when reading a large amount of
|
||||
//data from a slow connection
|
||||
start := 0
|
||||
end := len(p)
|
||||
for start < end {
|
||||
length := end - start
|
||||
if length > t.chunkSize {
|
||||
length = t.chunkSize
|
||||
}
|
||||
|
||||
resetTimer(t.timer, t.timeout)
|
||||
n, err := t.reader.Read(p[start : start+length])
|
||||
start += n
|
||||
if n == 0 || err != nil {
|
||||
return start, err
|
||||
}
|
||||
}
|
||||
|
||||
resetTimer(t.timer, t.timeout)
|
||||
n, err = t.reader.Read(p)
|
||||
resetTimer(t.timer, t.timeout)
|
||||
return
|
||||
return start, nil
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
|
|
137
vendor/github.com/ncw/swift/watchdog_reader_test.go
generated
vendored
Normal file
137
vendor/github.com/ncw/swift/watchdog_reader_test.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
// This tests WatchdogReader
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Uses testReader from timeout_reader_test.go
|
||||
|
||||
func testWatchdogReaderTimeout(t *testing.T, initialTimeout, watchdogTimeout time.Duration, expectedTimeout bool) {
|
||||
test := newTestReader(3, 10*time.Millisecond)
|
||||
timer, firedChan := setupTimer(initialTimeout)
|
||||
wr := newWatchdogReader(test, watchdogTimeout, timer)
|
||||
b, err := ioutil.ReadAll(wr)
|
||||
if err != nil || string(b) != "AAA" {
|
||||
t.Fatalf("Bad read %s %s", err, b)
|
||||
}
|
||||
checkTimer(t, firedChan, expectedTimeout)
|
||||
}
|
||||
|
||||
func setupTimer(initialTimeout time.Duration) (timer *time.Timer, fired <-chan bool) {
|
||||
timer = time.NewTimer(initialTimeout)
|
||||
firedChan := make(chan bool)
|
||||
started := make(chan bool)
|
||||
go func() {
|
||||
started <- true
|
||||
select {
|
||||
case <-timer.C:
|
||||
firedChan <- true
|
||||
}
|
||||
}()
|
||||
<-started
|
||||
return timer, firedChan
|
||||
}
|
||||
|
||||
func checkTimer(t *testing.T, firedChan <-chan bool, expectedTimeout bool) {
|
||||
fired := false
|
||||
select {
|
||||
case fired = <-firedChan:
|
||||
default:
|
||||
}
|
||||
if expectedTimeout {
|
||||
if !fired {
|
||||
t.Fatal("Timer should have fired")
|
||||
}
|
||||
} else {
|
||||
if fired {
|
||||
t.Fatal("Timer should not have fired")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchdogReaderNoTimeout(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 100*time.Millisecond, 100*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestWatchdogReaderTimeout(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 5*time.Millisecond, 5*time.Millisecond, true)
|
||||
}
|
||||
|
||||
func TestWatchdogReaderNoTimeoutShortInitial(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 5*time.Millisecond, 100*time.Millisecond, false)
|
||||
}
|
||||
|
||||
func TestWatchdogReaderTimeoutLongInitial(t *testing.T) {
|
||||
testWatchdogReaderTimeout(t, 100*time.Millisecond, 5*time.Millisecond, true)
|
||||
}
|
||||
|
||||
//slowReader simulates reading from a slow network connection by introducing a delay
|
||||
//in each Read() proportional to the amount of bytes read.
|
||||
type slowReader struct {
|
||||
reader io.Reader
|
||||
delayPerByte time.Duration
|
||||
}
|
||||
|
||||
func (r *slowReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.reader.Read(p)
|
||||
if n > 0 {
|
||||
time.Sleep(time.Duration(n) * r.delayPerByte)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//This test verifies that the watchdogReader's timeout is not triggered by data
|
||||
//that comes in very slowly. (It should only be triggered if no data arrives at
|
||||
//all.)
|
||||
func TestWatchdogReaderOnSlowNetwork(t *testing.T) {
|
||||
byteString := make([]byte, 8*watchdogChunkSize)
|
||||
reader := &slowReader{
|
||||
reader: bytes.NewReader(byteString),
|
||||
//reading everything at once would take 100 ms, which is longer than the
|
||||
//watchdog timeout below
|
||||
delayPerByte: 200 * time.Millisecond / time.Duration(len(byteString)),
|
||||
}
|
||||
|
||||
timer, firedChan := setupTimer(10 * time.Millisecond)
|
||||
wr := newWatchdogReader(reader, 190*time.Millisecond, timer)
|
||||
|
||||
//use io.ReadFull instead of ioutil.ReadAll here because ReadAll already does
|
||||
//some chunking that would keep this testcase from failing
|
||||
b := make([]byte, len(byteString))
|
||||
n, err := io.ReadFull(wr, b)
|
||||
if err != nil || n != len(b) || !bytes.Equal(b, byteString) {
|
||||
t.Fatal("Bad read %s %d", err, n)
|
||||
}
|
||||
|
||||
checkTimer(t, firedChan, false)
|
||||
}
|
||||
|
||||
//This test verifies that the watchdogReader's chunking logic does not mess up
|
||||
//the byte strings that are read.
|
||||
func TestWatchdogReaderValidity(t *testing.T) {
|
||||
byteString := []byte("abcdefghij")
|
||||
//make a reader with a non-standard chunk size (1 MiB would be much too huge
|
||||
//to comfortably look at the bytestring that comes out of the reader)
|
||||
wr := &watchdogReader{
|
||||
reader: bytes.NewReader(byteString),
|
||||
chunkSize: 3, //len(byteString) % chunkSize != 0 to be extra rude :)
|
||||
//don't care about the timeout stuff here
|
||||
timeout: 5 * time.Minute,
|
||||
timer: time.NewTimer(5 * time.Minute),
|
||||
}
|
||||
|
||||
b := make([]byte, len(byteString))
|
||||
n, err := io.ReadFull(wr, b)
|
||||
if err != nil || n != len(b) {
|
||||
t.Fatal("Read error: %s", err)
|
||||
}
|
||||
if !bytes.Equal(b, byteString) {
|
||||
t.Fatal("Bad read: %#v != %#v", string(b), string(byteString))
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue